aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/entry.h4
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/uapi/asm/elf.h11
-rw-r--r--arch/arc/kernel/arcksyms.c2
-rw-r--r--arch/arc/kernel/process.c2
-rw-r--r--arch/arc/kernel/setup.c6
-rw-r--r--arch/arc/mm/cache.c9
-rw-r--r--arch/arc/mm/highmem.c1
-rw-r--r--arch/arm/kernel/entry-armv.S1
-rw-r--r--arch/arm/kvm/mmu.c2
-rw-r--r--arch/arm/mach-imx/gpc.c6
-rw-r--r--arch/arm/mm/mmu.c21
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/sleep.S10
-rw-r--r--arch/arm64/kvm/hyp/switch.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c10
-rw-r--r--arch/arm64/mm/dump.c6
-rw-r--r--arch/arm64/mm/numa.c2
-rw-r--r--arch/mips/include/asm/page.h4
-rw-r--r--arch/mips/kvm/mmu.c2
-rw-r--r--arch/parisc/include/uapi/asm/errno.h4
-rw-r--r--arch/parisc/kernel/processor.c8
-rw-r--r--arch/parisc/kernel/time.c12
-rw-r--r--arch/s390/boot/compressed/head.S11
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig2
-rw-r--r--arch/s390/configs/performance_defconfig2
-rw-r--r--arch/s390/crypto/crc32-vx.c6
-rw-r--r--arch/s390/defconfig2
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/lib/string.c16
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c4
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S7
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c4
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c9
-rw-r--r--arch/x86/kernel/smpboot.c25
-rw-r--r--arch/x86/kvm/vmx.c136
-rw-r--r--arch/x86/mm/kaslr.c2
-rw-r--r--arch/x86/pci/vmd.c10
-rw-r--r--arch/x86/power/hibernate_64.c2
-rw-r--r--arch/x86/xen/enlighten.c2
47 files changed, 224 insertions, 169 deletions
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index ad7860c5ce15..51597f344a62 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -142,7 +142,7 @@
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it with rest of callee_regs
- ld.as r12, [r12, PT_user_r25]
+ ld r12, [r12, PT_user_r25]
PUSH r12
#else
PUSH r25
@@ -198,7 +198,7 @@
; SP is back to start of pt_regs
#ifdef CONFIG_ARC_CURR_IN_REG
- st.as r12, [sp, PT_user_r25]
+ st r12, [sp, PT_user_r25]
#endif
.endm
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index c1d36458bfb7..4c6eed80cd8b 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
.endm
.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
- TRACE_ASM_IRQ_ENABLE
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 0f92d97432a2..89eeb3720051 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
index 0f99ac8fcbb2..0037a587320d 100644
--- a/arch/arc/include/uapi/asm/elf.h
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -13,8 +13,15 @@
/* Machine specific ELF Hdr flags */
#define EF_ARC_OSABI_MSK 0x00000f00
-#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
-#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
+
+#define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */
+#define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */
+
+#if __GNUC__ < 6
+#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3
+#else
+#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4
+#endif
typedef unsigned long elf_greg_t;
typedef unsigned long elf_fpregset_t;
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
index 4d9e77724bed..000dd041ab42 100644
--- a/arch/arc/kernel/arcksyms.c
+++ b/arch/arc/kernel/arcksyms.c
@@ -28,6 +28,7 @@ extern void __muldf3(void);
extern void __divdf3(void);
extern void __floatunsidf(void);
extern void __floatunsisf(void);
+extern void __udivdi3(void);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
EXPORT_SYMBOL(__divdf3);
EXPORT_SYMBOL(__floatunsidf);
EXPORT_SYMBOL(__floatunsisf);
+EXPORT_SYMBOL(__udivdi3);
/* ARC optimised assembler routines */
EXPORT_SYMBOL(memset);
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index b5db9e7fd649..be1972bd2729 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x)
}
eflags = x->e_flags;
- if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+ if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
pr_err("ABI mismatch - you need newer toolchain\n");
force_sigsegv(SIGSEGV, current);
return 0;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index a946400a86d0..f52a0d0dc462 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
- n += scnprintf(buf + n, len - n,
- "OS ABI [v3]\t: no-legacy-syscalls\n");
+ n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
+ EF_ARC_OSABI_CURRENT >> 8,
+ EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
+ "no-legacy-syscalls" : "64-bit data any register aligned");
return buf;
}
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 5a294b2c3cb3..0b10efe3a6a7 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -921,6 +921,15 @@ void arc_cache_init(void)
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ /*
+ * Only master CPU needs to execute rest of function:
+ * - Assume SMP so all cores will have same cache config so
+ * any geomtry checks will be same for all
+ * - IOC setup / dma callbacks only need to be setup once
+ */
+ if (cpu)
+ return;
+
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 04f83322c9fd..77ff64a874a1 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -61,6 +61,7 @@ void *kmap(struct page *page)
return kmap_high(page);
}
+EXPORT_SYMBOL(kmap);
void *kmap_atomic(struct page *page)
{
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bc5f50799d75..9f157e7c51e7 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -295,6 +295,7 @@ __und_svc_fault:
bl __und_fault
__und_svc_finish:
+ get_thread_info tsk
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
svc_exit r5 @ return from exception
UNWIND(.fnend )
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index bda27b6b1aa2..29d0b23af2a9 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
smp_rmb();
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
- if (is_error_pfn(pfn))
+ if (is_error_noslot_pfn(pfn))
return -EFAULT;
if (kvm_is_device_pfn(pfn)) {
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index fd8720532471..0df062d8b2c9 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node,
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
+ /*
+ * Clear the OF_POPULATED flag set in of_irq_init so that
+ * later the GPC power domain driver will not be skipped.
+ */
+ of_node_clear_flag(node, OF_POPULATED);
+
return 0;
}
IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 62f4d01941f7..6344913f0804 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz)
{
void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
- BUG_ON(!ptr);
+ if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+ BUG();
return ptr;
}
@@ -1155,10 +1156,19 @@ void __init sanity_check_meminfo(void)
{
phys_addr_t memblock_limit = 0;
int highmem = 0;
- phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
+ u64 vmalloc_limit;
struct memblock_region *reg;
bool should_use_highmem = false;
+ /*
+ * Let's use our own (unoptimized) equivalent of __pa() that is
+ * not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
+ * The result is used as the upper bound on physical memory address
+ * and may itself be outside the valid range for which phys_addr_t
+ * and therefore __pa() is defined.
+ */
+ vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
@@ -1183,10 +1193,11 @@ void __init sanity_check_meminfo(void)
if (reg->size > size_limit) {
phys_addr_t overlap_size = reg->size - size_limit;
- pr_notice("Truncating RAM at %pa-%pa to -%pa",
- &block_start, &block_end, &vmalloc_limit);
- memblock_remove(vmalloc_limit, overlap_size);
+ pr_notice("Truncating RAM at %pa-%pa",
+ &block_start, &block_end);
block_end = vmalloc_limit;
+ pr_cont(" to -%pa", &block_end);
+ memblock_remove(vmalloc_limit, overlap_size);
should_use_highmem = true;
}
}
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index b0b82f5ea338..3d2cef6488ea 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
static struct vcpu_info __percpu *xen_vcpu_info;
/* Linux <-> Xen vCPU id mapping */
-DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
/* These are unused until we support booting "pre-ballooned" */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b77f58355da1..3e7b050e99dc 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
isb
bl __create_page_tables // recreate kernel mapping
+ tlbi vmalle1 // Remove any stale TLB entries
+ dsb nsh
+
msr sctlr_el1, x19 // re-enable the MMU
isb
ic iallu // flush instructions fetched
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 9a3aec97ac09..ccf79d849e0a 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,12 +101,20 @@ ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
/* enable the MMU early - so we can access sleep_save_stash by va */
adr_l lr, __enable_mmu /* __cpu_setup will return here */
- ldr x27, =_cpu_resume /* __enable_mmu will branch here */
+ adr_l x27, _resume_switched /* __enable_mmu will branch here */
adrp x25, idmap_pg_dir
adrp x26, swapper_pg_dir
b __cpu_setup
ENDPROC(cpu_resume)
+ .pushsection ".idmap.text", "ax"
+_resume_switched:
+ ldr x8, =_cpu_resume
+ br x8
+ENDPROC(_resume_switched)
+ .ltorg
+ .popsection
+
ENTRY(_cpu_resume)
mrs x1, mpidr_el1
adrp x8, mpidr_hash
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ae7855f16ec2..5a84b4562603 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
/*
* We must restore the 32-bit state before the sysregs, thanks
- * to Cortex-A57 erratum #852523.
+ * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*/
__sysreg32_restore_state(vcpu);
__sysreg_restore_guest_state(guest_ctxt);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b0b225ceca18..e51367d159d0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
*
- * We could trap ID_DFR0 and tell the guest we don't support performance
- * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
- * NAKed, so it will read the PMCR anyway.
- *
- * Therefore we tell the guest we have 0 counters. Unfortunately, we
- * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
- * all PM registers, which doesn't crash the guest kernel at least.
- *
* Debug handling: We do trap most, if not all debug related system
* registers. The implementation is good enough to ensure that a guest
* can use these with minimal performance degradation. The drawback is
@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
/* ICC_SRE */
- { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
+ { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index f94b80eb295d..9c3e75df2180 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
{
- pte_t *pte = pte_offset_kernel(pmd, 0);
+ pte_t *pte = pte_offset_kernel(pmd, 0UL);
unsigned long addr;
unsigned i;
@@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
- pmd_t *pmd = pmd_offset(pud, 0);
+ pmd_t *pmd = pmd_offset(pud, 0UL);
unsigned long addr;
unsigned i;
@@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
{
- pud_t *pud = pud_offset(pgd, 0);
+ pud_t *pud = pud_offset(pgd, 0UL);
unsigned long addr;
unsigned i;
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index c7fe3ec70774..5bb15eab6f00 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -23,6 +23,8 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <asm/acpi.h>
+
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
nodemask_t numa_nodes_parsed __initdata;
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ea0cd9773914..5f987598054f 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
*/
static inline unsigned long ___pa(unsigned long x)
{
- if (config_enabled(CONFIG_64BIT)) {
+ if (IS_ENABLED(CONFIG_64BIT)) {
/*
* For MIPS64 the virtual address may either be in one of
* the compatibility segements ckseg0 or ckseg1, or it may
@@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
}
- if (!config_enabled(CONFIG_EVA)) {
+ if (!IS_ENABLED(CONFIG_EVA)) {
/*
* We're using the standard MIPS32 legacy memory map, ie.
* the address x is going to be in kseg0 or kseg1. We can
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 6cfdcf55572d..121008c0fcc9 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
srcu_idx = srcu_read_lock(&kvm->srcu);
pfn = gfn_to_pfn(kvm, gfn);
- if (is_error_pfn(pfn)) {
+ if (is_error_noslot_pfn(pfn)) {
kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
err = -EFAULT;
goto out;
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index c0ae62520d15..274d5bc6ecce 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -97,10 +97,10 @@
#define ENOTCONN 235 /* Transport endpoint is not connected */
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
-#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
-#define EREMOTERELEASE 240 /* Remote peer released connection */
+#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 5adc339eb7c8..0c2a94a0f751 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency);
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
-extern int update_cr16_clocksource(void); /* from time.c */
-
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
**
@@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev)
}
#endif
- /* If we've registered more than one cpu,
- * we'll use the jiffies clocksource since cr16
- * is not synchronized between CPUs.
- */
- update_cr16_clocksource();
-
return 0;
}
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 505cf1ac5af2..4b0b963d52a7 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -221,18 +221,6 @@ static struct clocksource clocksource_cr16 = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-int update_cr16_clocksource(void)
-{
- /* since the cr16 cycle counters are not synchronized across CPUs,
- we'll check if we should switch to a safe clocksource: */
- if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
- clocksource_change_rating(&clocksource_cr16, 0);
- return 1;
- }
-
- return 0;
-}
-
void __init start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index f86a4eef28a9..28c4f96a2d9c 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -21,16 +21,21 @@ ENTRY(startup_continue)
lg %r15,.Lstack-.LPG1(%r13)
aghi %r15,-160
brasl %r14,decompress_kernel
- # setup registers for memory mover & branch to target
+ # Set up registers for memory mover. We move the decompressed image to
+ # 0x11000, starting at offset 0x11000 in the decompressed image so
+ # that code living at 0x11000 in the image will end up at 0x11000 in
+ # memory.
lgr %r4,%r2
lg %r2,.Loffset-.LPG1(%r13)
la %r4,0(%r2,%r4)
lg %r3,.Lmvsize-.LPG1(%r13)
lgr %r5,%r3
- # move the memory mover someplace safe
+ # Move the memory mover someplace safe so it doesn't overwrite itself.
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
- # decompress image is started at 0x11000
+ # When the memory mover is done we pass control to
+ # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
+ # the decompressed image.
lgr %r6,%r2
br %r1
mover:
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 889ea3450210..26e0c7f08814 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -678,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 1bcfd764910a..24879dab47bc 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -616,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 13ff090139c8..a5c1e5f2a0ca 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -615,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 577ae1d4ae89..2bad9d837029 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
struct kernel_fpu vxstate; \
unsigned long prealign, aligned, remaining; \
\
+ if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
+ return ___crc32_sw(crc, data, datalen); \
+ \
if ((unsigned long)data & VX_ALIGN_MASK) { \
prealign = VX_ALIGNMENT - \
((unsigned long)data & VX_ALIGN_MASK); \
@@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
data = (void *)((unsigned long)data + prealign); \
} \
\
- if (datalen < VX_MIN_LEN) \
- return ___crc32_sw(crc, data, datalen); \
- \
aligned = datalen & ~VX_ALIGN_MASK; \
remaining = datalen & VX_ALIGN_MASK; \
\
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index ccccebeeaaf6..73610f2e3b4f 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -234,7 +234,7 @@ CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 56e4d8234ef2..4431905f8cfa 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -309,7 +309,9 @@ ENTRY(startup_kdump)
l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-STACK_FRAME_OVERHEAD
brasl %r14,verify_facilities
- /* Continue with startup code in head64.S */
+# For uncompressed images, continue in
+# arch/s390/kernel/head64.S. For compressed images, continue in
+# arch/s390/boot/compressed/head.S.
jg startup_continue
.Lstack:
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ba5f456edaa9..7f7ba5f23f13 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -204,11 +204,9 @@ static void __init conmode_default(void)
#endif
}
} else if (MACHINE_IS_KVM) {
- if (sclp.has_vt220 &&
- config_enabled(CONFIG_SCLP_VT220_CONSOLE))
+ if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
SET_CONSOLE_VT220;
- else if (sclp.has_linemode &&
- config_enabled(CONFIG_SCLP_CONSOLE))
+ else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
SET_CONSOLE_SCLP;
else
SET_CONSOLE_HVC;
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index e390bbb16443..48352bffbc92 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -237,11 +237,10 @@ char * strrchr(const char * s, int c)
EXPORT_SYMBOL(strrchr);
static inline int clcle(const char *s1, unsigned long l1,
- const char *s2, unsigned long l2,
- int *diff)
+ const char *s2, unsigned long l2)
{
register unsigned long r2 asm("2") = (unsigned long) s1;
- register unsigned long r3 asm("3") = (unsigned long) l2;
+ register unsigned long r3 asm("3") = (unsigned long) l1;
register unsigned long r4 asm("4") = (unsigned long) s2;
register unsigned long r5 asm("5") = (unsigned long) l2;
int cc;
@@ -252,7 +251,6 @@ static inline int clcle(const char *s1, unsigned long l1,
" srl %0,28"
: "=&d" (cc), "+a" (r2), "+a" (r3),
"+a" (r4), "+a" (r5) : : "cc");
- *diff = *(char *)r2 - *(char *)r4;
return cc;
}
@@ -270,9 +268,9 @@ char * strstr(const char * s1,const char * s2)
return (char *) s1;
l1 = __strend(s1) - s1;
while (l1-- >= l2) {
- int cc, dummy;
+ int cc;
- cc = clcle(s1, l1, s2, l2, &dummy);
+ cc = clcle(s1, l2, s2, l2);
if (!cc)
return (char *) s1;
s1++;
@@ -313,11 +311,11 @@ EXPORT_SYMBOL(memchr);
*/
int memcmp(const void *cs, const void *ct, size_t n)
{
- int ret, diff;
+ int ret;
- ret = clcle(cs, n, ct, n, &diff);
+ ret = clcle(cs, n, ct, n);
if (ret)
- ret = diff;
+ ret = ret == 1 ? -1 : 1;
return ret;
}
EXPORT_SYMBOL(memcmp);
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 7104ffb5a67f..af7cf28cf97e 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -252,6 +252,8 @@ static int change_page_attr(unsigned long addr, unsigned long end,
int rc = -EINVAL;
pgd_t *pgdp;
+ if (addr == end)
+ return 0;
if (end >= MODULES_END)
return -EINVAL;
mutex_lock(&cpa_mutex);
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 1dd5bd8a8c59..133055311dce 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -81,7 +81,7 @@
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
+ .exit.text : { EXIT_TEXT }
.exit.data : { *(.exit.data) }
.preinit_array : {
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 89fa85e8b10c..6f97fb33ae21 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled())
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
else {
local_bh_disable();
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
local_bh_enable();
}
}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index b691da981cd9..a78a0694ddef 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
+ vmovd _args_digest(state , idx, 4) , %xmm0
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
- vmovdqu %xmm0, _result_digest(job_rax)
- movl tmp2_w, _result_digest+1*16(job_rax)
+ vmovdqu %xmm0, _result_digest(job_rax)
+ offset = (_result_digest + 1*16)
+ vmovdqu %xmm1, offset(job_rax)
pop %rbx
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index f4cf5b78fd36..d210174a52b0 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled())
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
else {
local_bh_disable();
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
local_bh_enable();
}
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index cea4fc19e844..50c95af0f017 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1623,6 +1623,9 @@ void __init enable_IR_x2apic(void)
unsigned long flags;
int ret, ir_stat;
+ if (skip_ioapic_setup)
+ return;
+
ir_stat = irq_remapping_prepare();
if (ir_stat < 0 && !x2apic_supported())
return;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 27a0228c9cae..b816971f5da4 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -355,6 +355,7 @@ void load_ucode_amd_ap(void)
unsigned int cpu = smp_processor_id();
struct equiv_cpu_entry *eq;
struct microcode_amd *mc;
+ u8 *cont = container;
u32 rev, eax;
u16 eq_id;
@@ -371,8 +372,11 @@ void load_ucode_amd_ap(void)
if (check_current_patch_level(&rev, false))
return;
+ /* Add CONFIG_RANDOMIZE_MEMORY offset. */
+ cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+
eax = cpuid_eax(0x00000001);
- eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
+ eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
eq_id = find_equiv_id(eq, eax);
if (!eq_id)
@@ -434,6 +438,9 @@ int __init save_microcode_in_initrd_amd(void)
else
container = cont_va;
+ /* Add CONFIG_RANDOMIZE_MEMORY offset. */
+ container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2a6e84a30a54..4296beb8fdd3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -100,10 +100,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
/* Logical package management. We might want to allocate that dynamically */
static int *physical_to_logical_pkg __read_mostly;
static unsigned long *physical_package_map __read_mostly;;
-static unsigned long *logical_package_map __read_mostly;
static unsigned int max_physical_pkg_id __read_mostly;
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
+static unsigned int logical_packages __read_mostly;
+static bool logical_packages_frozen __read_mostly;
/* Maximum number of SMT threads on any online core */
int __max_smt_threads __read_mostly;
@@ -277,14 +278,14 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu)
if (test_and_set_bit(pkg, physical_package_map))
goto found;
- new = find_first_zero_bit(logical_package_map, __max_logical_packages);
- if (new >= __max_logical_packages) {
+ if (logical_packages_frozen) {
physical_to_logical_pkg[pkg] = -1;
- pr_warn("APIC(%x) Package %u exceeds logical package map\n",
+ pr_warn("APIC(%x) Package %u exceeds logical package max\n",
apicid, pkg);
return -ENOSPC;
}
- set_bit(new, logical_package_map);
+
+ new = logical_packages++;
pr_info("APIC(%x) Converting physical %u to logical package %u\n",
apicid, pkg, new);
physical_to_logical_pkg[pkg] = new;
@@ -341,6 +342,7 @@ static void __init smp_init_package_map(void)
}
__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
+ logical_packages = 0;
/*
* Possibly larger than what we need as the number of apic ids per
@@ -352,10 +354,6 @@ static void __init smp_init_package_map(void)
memset(physical_to_logical_pkg, 0xff, size);
size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
physical_package_map = kzalloc(size, GFP_KERNEL);
- size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
- logical_package_map = kzalloc(size, GFP_KERNEL);
-
- pr_info("Max logical packages: %u\n", __max_logical_packages);
for_each_present_cpu(cpu) {
unsigned int apicid = apic->cpu_present_to_apicid(cpu);
@@ -369,6 +367,15 @@ static void __init smp_init_package_map(void)
set_cpu_possible(cpu, false);
set_cpu_present(cpu, false);
}
+
+ if (logical_packages > __max_logical_packages) {
+ pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
+ logical_packages, __max_logical_packages);
+ logical_packages_frozen = true;
+ __max_logical_packages = logical_packages;
+ }
+
+ pr_info("Max logical packages: %u\n", __max_logical_packages);
}
void __init smp_store_boot_cpu_info(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a45d8580f91e..5cede40e2552 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -422,6 +422,7 @@ struct nested_vmx {
struct list_head vmcs02_pool;
int vmcs02_num;
u64 vmcs01_tsc_offset;
+ bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
/*
@@ -435,6 +436,8 @@ struct nested_vmx {
bool pi_pending;
u16 posted_intr_nv;
+ unsigned long *msr_bitmap;
+
struct hrtimer preemption_timer;
bool preemption_timer_expired;
@@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_nested;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;
@@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
new.control) != old.control);
}
+static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
+{
+ vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
+ vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
@@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Setup TSC multiplier */
if (kvm_has_tsc_control &&
- vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
- vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
- vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
- }
+ vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
+ decache_tsc_multiplier(vmx);
vmx_vcpu_pi_load(vcpu, cpu);
vmx->host_pkru = read_pkru();
@@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
unsigned long *msr_bitmap;
if (is_guest_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_nested;
+ msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
else if (cpu_has_secondary_exec_ctrls() &&
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
@@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
if (!vmx_msr_bitmap_longmode_x2apic)
goto out4;
- if (nested) {
- vmx_msr_bitmap_nested =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_nested)
- goto out5;
- }
-
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap)
goto out6;
@@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
- if (nested)
- memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
@@ -6529,9 +6526,6 @@ out8:
out7:
free_page((unsigned long)vmx_vmread_bitmap);
out6:
- if (nested)
- free_page((unsigned long)vmx_msr_bitmap_nested);
-out5:
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
out4:
free_page((unsigned long)vmx_msr_bitmap_longmode);
@@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
free_page((unsigned long)vmx_io_bitmap_a);
free_page((unsigned long)vmx_vmwrite_bitmap);
free_page((unsigned long)vmx_vmread_bitmap);
- if (nested)
- free_page((unsigned long)vmx_msr_bitmap_nested);
free_kvm_area();
}
@@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
+ if (cpu_has_vmx_msr_bitmap()) {
+ vmx->nested.msr_bitmap =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx->nested.msr_bitmap)
+ goto out_msr_bitmap;
+ }
+
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_vmcs12)
- return -ENOMEM;
+ goto out_cached_vmcs12;
if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs();
- if (!shadow_vmcs) {
- kfree(vmx->nested.cached_vmcs12);
- return -ENOMEM;
- }
+ if (!shadow_vmcs)
+ goto out_shadow_vmcs;
/* mark vmcs as shadow */
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
@@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
+
+out_shadow_vmcs:
+ kfree(vmx->nested.cached_vmcs12);
+
+out_cached_vmcs12:
+ free_page((unsigned long)vmx->nested.msr_bitmap);
+
+out_msr_bitmap:
+ return -ENOMEM;
}
/*
@@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
vmx->nested.vmxon = false;
free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx);
+ if (vmx->nested.msr_bitmap) {
+ free_page((unsigned long)vmx->nested.msr_bitmap);
+ vmx->nested.msr_bitmap = NULL;
+ }
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
kfree(vmx->nested.cached_vmcs12);
@@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
{
u32 sec_exec_control;
+ /* Postpone execution until vmcs01 is the current VMCS. */
+ if (is_guest_mode(vcpu)) {
+ to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
+ return;
+ }
+
/*
* There is not point to enable virtualize x2apic without enable
* apicv
@@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
{
int msr;
struct page *page;
- unsigned long *msr_bitmap;
+ unsigned long *msr_bitmap_l1;
+ unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
+ /* This shortcut is ok because we support only x2APIC MSRs so far. */
if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
return false;
@@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
WARN_ON(1);
return false;
}
- msr_bitmap = (unsigned long *)kmap(page);
- if (!msr_bitmap) {
+ msr_bitmap_l1 = (unsigned long *)kmap(page);
+ if (!msr_bitmap_l1) {
nested_release_page_clean(page);
WARN_ON(1);
return false;
}
+ memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
+
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
if (nested_cpu_has_apic_reg_virt(vmcs12))
for (msr = 0x800; msr <= 0x8ff; msr++)
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
msr, MSR_TYPE_R);
- /* TPR is allowed */
- nested_vmx_disable_intercept_for_msr(msr_bitmap,
- vmx_msr_bitmap_nested,
+
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_R | MSR_TYPE_W);
+
if (nested_cpu_has_vid(vmcs12)) {
- /* EOI and self-IPI are allowed */
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_EOI >> 4),
MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W);
}
- } else {
- /*
- * Enable reading intercept of all the x2apic
- * MSRs. We should not rely on vmcs12 to do any
- * optimizations here, it may have been modified
- * by L1.
- */
- for (msr = 0x800; msr <= 0x8ff; msr++)
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- msr,
- MSR_TYPE_R);
-
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_TASKPRI >> 4),
- MSR_TYPE_W);
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_EOI >> 4),
- MSR_TYPE_W);
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
- MSR_TYPE_W);
}
kunmap(page);
nested_release_page_clean(page);
@@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
}
if (cpu_has_vmx_msr_bitmap() &&
- exec_control & CPU_BASED_USE_MSR_BITMAPS) {
- nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
- /* MSR_BITMAP will be set by following vmx_set_efer. */
- } else
+ exec_control & CPU_BASED_USE_MSR_BITMAPS &&
+ nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
+ ; /* MSR_BITMAP will be set by following vmx_set_efer. */
+ else
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
/*
@@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
else
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ if (kvm_has_tsc_control)
+ decache_tsc_multiplier(vmx);
if (enable_vpid) {
/*
@@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
else
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER);
+ if (kvm_has_tsc_control)
+ decache_tsc_multiplier(vmx);
+
+ if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
+ vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
+ vmx_set_virtual_x2apic_mode(vcpu,
+ vcpu->arch.apic_base & X2APIC_ENABLE);
+ }
/* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx->host_rsp = 0;
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index ec8654f117d8..bda8d5eef04d 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
*/
static inline bool kaslr_memory_enabled(void)
{
- return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
+ return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
}
/* Initialize base and padding for each memory region randomized with KASLR */
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c
index b814ca675131..7948be342ee9 100644
--- a/arch/x86/pci/vmd.c
+++ b/arch/x86/pci/vmd.c
@@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
* @node: list item for parent traversal.
* @rcu: RCU callback item for freeing.
* @irq: back pointer to parent.
+ * @enabled: true if driver enabled IRQ
* @virq: the virtual IRQ value provided to the requesting driver.
*
* Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
@@ -50,6 +51,7 @@ struct vmd_irq {
struct list_head node;
struct rcu_head rcu;
struct vmd_irq_list *irq;
+ bool enabled;
unsigned int virq;
};
@@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
unsigned long flags;
raw_spin_lock_irqsave(&list_lock, flags);
+ WARN_ON(vmdirq->enabled);
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+ vmdirq->enabled = true;
raw_spin_unlock_irqrestore(&list_lock, flags);
data->chip->irq_unmask(data);
@@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
data->chip->irq_mask(data);
raw_spin_lock_irqsave(&list_lock, flags);
- list_del_rcu(&vmdirq->node);
- INIT_LIST_HEAD_RCU(&vmdirq->node);
+ if (vmdirq->enabled) {
+ list_del_rcu(&vmdirq->node);
+ vmdirq->enabled = false;
+ }
raw_spin_unlock_irqrestore(&list_lock, flags);
}
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index a3e3ccc87138..9634557a5444 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -113,7 +113,7 @@ static int set_up_temporary_mappings(void)
return result;
}
- temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
+ temp_level4_pgt = __pa(pgd);
return 0;
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 8ffb089b19a5..b86ebb1a9a7f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */
-DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
enum xen_domain_type xen_domain_type = XEN_NATIVE;