aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts2
-rw-r--r--arch/arm/boot/dts/imx50.dtsi2
-rw-r--r--arch/arm/boot/dts/imx51.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi4
-rw-r--r--arch/arm/include/asm/hardirq.h1
-rw-r--r--arch/arm/kernel/smp.c6
-rw-r--r--arch/arm/mach-exynos/suspend.c19
-rw-r--r--arch/arm/mach-omap2/pm33xx-core.c8
-rw-r--r--arch/arm64/mm/mmu.c11
-rw-r--r--arch/ia64/mm/numa.c1
-rw-r--r--arch/mips/kernel/prom.c14
-rw-r--r--arch/mips/mm/mmap.c2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kvm/book3s.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c9
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c14
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c4
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kvm/kvm-s390.c35
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/x86/events/intel/ds.c28
-rw-r--r--arch/x86/kernel/cpu/amd.c7
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_monitor.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c13
-rw-r--r--arch/x86/kernel/ftrace.c42
-rw-r--r--arch/x86/mm/kasan_init_64.c2
-rw-r--r--arch/x86/mm/kaslr.c11
-rw-r--r--arch/x86/pci/irq.c10
33 files changed, 168 insertions, 98 deletions
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index cdda614e417e..a370857beac0 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -106,6 +106,7 @@
regulator-name = "PVDD_APIO_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
ldo3_reg: LDO3 {
@@ -144,6 +145,7 @@
regulator-name = "PVDD_ABB_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
ldo9_reg: LDO9 {
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index a9b712db9f6c..d7553f825483 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -438,7 +438,7 @@
reg = <0x63fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
- <&clks IMX5_CLK_SDMA_GATE>;
+ <&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin";
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index fe01b890c715..2fe42deeb2be 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -467,7 +467,7 @@
reg = <0x83fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
- <&clks IMX5_CLK_SDMA_GATE>;
+ <&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 1a7a7bb3df45..2c67501a6c98 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -696,7 +696,7 @@
reg = <0x63fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
- <&clks IMX5_CLK_SDMA_GATE>;
+ <&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin";
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 911141e24681..e319fe6c7609 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -905,7 +905,7 @@
compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6QDL_CLK_SDMA>,
+ clocks = <&clks IMX6QDL_CLK_IPG>,
<&clks IMX6QDL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 994e48dc1df0..31e0e085ac1c 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -712,7 +712,7 @@
reg = <0x020ec000 0x4000>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_SDMA>,
- <&clks IMX6SL_CLK_SDMA>;
+ <&clks IMX6SL_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
/* imx6sl reuses imx6q sdma firmware */
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 71bda604c435..7b5263ebc3dc 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -796,7 +796,7 @@
compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SX_CLK_SDMA>,
+ clocks = <&clks IMX6SX_CLK_IPG>,
<&clks IMX6SX_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index 47a3453a4211..91c96b068590 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -701,7 +701,7 @@
"fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6UL_CLK_SDMA>,
+ clocks = <&clks IMX6UL_CLK_IPG>,
<&clks IMX6UL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 9ced589bfa96..605b550ffe11 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -1051,8 +1051,8 @@
compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma";
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_SDMA_CORE_CLK>,
- <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
+ clocks = <&clks IMX7D_IPG_ROOT_CLK>,
+ <&clks IMX7D_SDMA_CORE_CLK>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index cba23eaa6072..7a88f160b1fb 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -6,6 +6,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
+/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
#define NR_IPI 7
typedef struct {
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index a3ce7c5365fa..bada66ef4419 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -76,6 +76,10 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
+ /*
+ * CPU_BACKTRACE is special and not included in NR_IPI
+ * or tracable with trace_ipi_*
+ */
IPI_CPU_BACKTRACE,
/*
* SGI8-15 can be reserved by secure firmware, and thus may
@@ -803,7 +807,7 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
- smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ __smp_cross_call(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index b15cdfdb8e1c..c7f58d08d76f 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -434,8 +434,27 @@ early_wakeup:
static void exynos5420_prepare_pm_resume(void)
{
+ unsigned int mpidr, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
WARN_ON(mcpm_cpu_powered_up());
+
+ if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
+ /*
+ * When system is resumed on the LITTLE/KFC core (cluster 1),
+ * the DSCR is not properly updated until the power is turned
+ * on also for the cluster 0. Enable it for a while to
+ * propagate the SPNIDEN and SPIDEN signals from Secure JTAG
+ * block and avoid undefined instruction issue on CP14 reset.
+ */
+ pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
+ EXYNOS_COMMON_CONFIGURATION(0));
+ pmu_raw_writel(0,
+ EXYNOS_COMMON_CONFIGURATION(0));
+ }
}
static void exynos5420_pm_resume(void)
diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
index 9b3755a2e2ec..8130736074b0 100644
--- a/arch/arm/mach-omap2/pm33xx-core.c
+++ b/arch/arm/mach-omap2/pm33xx-core.c
@@ -50,10 +50,12 @@ static int amx3_common_init(void)
/* CEFUSE domain can be turned off post bootup */
cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
- if (cefuse_pwrdm)
- omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
- else
+ if (!cefuse_pwrdm)
pr_err("PM: Failed to get cefuse_pwrdm\n");
+ else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
+ pr_info("PM: Leaving EFUSE power domain active\n");
+ else
+ omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
return 0;
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 8ae5d7ae4af3..a9df74d01400 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -920,13 +920,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
int __init arch_ioremap_pud_supported(void)
{
- /* only 4k granule supports level 1 block mappings */
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+ /*
+ * Only 4k granule supports level 1 block mappings.
+ * SW table walks can't handle removal of intermediate entries.
+ */
+ return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
+ !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}
int __init arch_ioremap_pmd_supported(void)
{
- return 1;
+ /* See arch_ioremap_pud_supported() */
+ return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index aa19b7ac8222..476c7b4be378 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
}
+EXPORT_SYMBOL(paddr_to_nid);
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
/*
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 89950b7bf536..bdaf3536241a 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -41,7 +41,19 @@ char *mips_get_machine_name(void)
#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
- return add_memory_region(base, size, BOOT_MEM_RAM);
+ if (base >= PHYS_ADDR_MAX) {
+ pr_warn("Trying to add an invalid memory region, skipped\n");
+ return;
+ }
+
+ /* Truncate the passed memory region instead of type casting */
+ if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) {
+ pr_warn("Truncate memory region %llx @ %llx to size %llx\n",
+ size, base, PHYS_ADDR_MAX - base);
+ size = PHYS_ADDR_MAX - base;
+ }
+
+ add_memory_region(base, size, BOOT_MEM_RAM);
}
int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 7755a1fad05a..1b705fb2f10c 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -203,7 +203,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
int __virt_addr_valid(const volatile void *kaddr)
{
- unsigned long vaddr = (unsigned long)vaddr;
+ unsigned long vaddr = (unsigned long)kaddr;
if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
return 0;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 68f69854095b..5a5c207f7ad2 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -292,6 +292,7 @@ struct kvm_arch {
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
struct list_head rtas_tokens;
+ struct mutex rtas_token_lock;
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#endif
#ifdef CONFIG_KVM_MPIC
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index edaf4720d156..5b95ef13abd6 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -841,6 +841,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
+ mutex_init(&kvm->arch.rtas_token_lock);
#endif
return kvm->arch.kvm_ops->init_vm(kvm);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2ebd5132a29f..85f63a0c0fb1 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -427,12 +427,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
- struct kvm_vcpu *ret;
-
- mutex_lock(&kvm->lock);
- ret = kvm_get_vcpu_by_id(kvm, id);
- mutex_unlock(&kvm->lock);
- return ret;
+ return kvm_get_vcpu_by_id(kvm, id);
}
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1310,7 +1305,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
- mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
@@ -1350,7 +1344,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
- mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 2d3b2b1cc272..8f2355138f80 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
{
struct rtas_token_definition *d, *tmp;
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
if (rtas_name_matches(d->handler->name, name)) {
@@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
bool found;
int i;
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
if (d->token == token)
@@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.rtas_token_lock);
if (args.token)
rc = rtas_token_define(kvm, args.name, args.token);
else
rc = rtas_token_undefine(kvm, args.name);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.rtas_token_lock);
return rc;
}
@@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
orig_rets = args.rets;
args.rets = &args.args[be32_to_cpu(args.nargs)];
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
@@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
}
}
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
if (rc == 0) {
args.rets = orig_rets;
@@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
{
struct rtas_token_definition *d, *tmp;
- lockdep_assert_held(&kvm->lock);
-
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
list_del(&d->list);
kfree(d);
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 3d27f02695e4..828f6656f8f7 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -161,6 +161,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
struct imc_pmu *pmu_ptr;
u32 offset;
+ /* Return for unknown domain */
+ if (domain < 0)
+ return -EINVAL;
+
/* memory for pmu */
pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
if (!pmu_ptr)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index ad6b91013a05..5332f628c1ed 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -56,8 +56,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+#ifndef CONFIG_KASAN
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
+#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3b7a5151b6a5..4379107a5866 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4041,21 +4041,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- int rc;
-
- /* If the basics of the memslot do not change, we do not want
- * to update the gmap. Every update causes several unnecessary
- * segment translation exceptions. This is usually handled just
- * fine by the normal fault handler + gmap, but it will also
- * cause faults on the prefix page of running guest CPUs.
- */
- if (old->userspace_addr == mem->userspace_addr &&
- old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
- old->npages * PAGE_SIZE == mem->memory_size)
- return;
+ int rc = 0;
- rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
- mem->guest_phys_addr, mem->memory_size);
+ switch (change) {
+ case KVM_MR_DELETE:
+ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+ old->npages * PAGE_SIZE);
+ break;
+ case KVM_MR_MOVE:
+ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+ old->npages * PAGE_SIZE);
+ if (rc)
+ break;
+ /* FALLTHROUGH */
+ case KVM_MR_CREATE:
+ rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
+ mem->guest_phys_addr, mem->memory_size);
+ break;
+ case KVM_MR_FLAGS_ONLY:
+ break;
+ default:
+ WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
+ }
if (rc)
pr_warn("failed to commit memory region\n");
return;
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 052de4c8acb2..0c572a48158e 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt)
static struct clock_event_device timer_clockevent = {
.name = "posix-timer",
.rating = 250,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = itimer_shutdown,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8dbba77e0518..7d8c3ff735c2 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
EVENT_CONSTRAINT_END
};
@@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
@@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
struct event_constraint intel_slm_pebs_event_constraints[] = {
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
@@ -732,7 +732,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};
@@ -749,7 +749,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};
@@ -758,7 +758,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -773,9 +773,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -789,9 +789,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
@@ -812,9 +812,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
@@ -835,9 +835,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
struct event_constraint intel_skl_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 909a474e74b1..6adf09779fa0 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -813,8 +813,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
- /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
- if (!cpu_has(c, X86_FEATURE_CPB))
+ /*
+ * Fix erratum 1076: CPB feature bit not being set in CPUID.
+ * Always set it, except when running under a hypervisor.
+ */
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
}
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
index b0f3aed76b75..3d4ec80a6bb9 100644
--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
+++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
@@ -371,6 +371,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
struct list_head *head;
struct rdtgroup *entry;
+ if (!is_mbm_local_enabled())
+ return;
+
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
closid = rgrp->closid;
rmid = rgrp->mon.rmid;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index b43ddefd77f4..a96091d44a45 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -790,13 +790,16 @@ static struct syscore_ops mc_syscore_ops = {
.resume = mc_bp_resume,
};
-static int mc_cpu_online(unsigned int cpu)
+static int mc_cpu_starting(unsigned int cpu)
{
- struct device *dev;
-
- dev = get_cpu_device(cpu);
microcode_update_cpu(cpu);
pr_debug("CPU%d added\n", cpu);
+ return 0;
+}
+
+static int mc_cpu_online(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
pr_err("Failed to create group for CPU%d\n", cpu);
@@ -873,6 +876,8 @@ int __init microcode_init(void)
goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops);
+ cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
+ mc_cpu_starting, NULL);
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 9f033dfd2766..becb075954aa 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -50,7 +50,7 @@ int ftrace_arch_code_modify_post_process(void)
union ftrace_code_union {
char code[MCOUNT_INSN_SIZE];
struct {
- unsigned char e8;
+ unsigned char op;
int offset;
} __attribute__((packed));
};
@@ -60,20 +60,23 @@ static int ftrace_calc_offset(long ip, long addr)
return (int)(addr - ip);
}
-static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static unsigned char *
+ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
{
static union ftrace_code_union calc;
- calc.e8 = 0xe8;
+ calc.op = op;
calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
- /*
- * No locking needed, this must be called via kstop_machine
- * which in essence is like running on a uniprocessor machine.
- */
return calc.code;
}
+static unsigned char *
+ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+ return ftrace_text_replace(0xe8, ip, addr);
+}
+
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
{
@@ -683,22 +686,6 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
-#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
-static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
-{
- static union ftrace_code_union calc;
-
- /* Jmp not a call (ignore the .e8) */
- calc.e8 = 0xe9;
- calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
-
- /*
- * ftrace external locks synchronize the access to the static variable.
- */
- return calc.code;
-}
-#endif
-
/* Currently only x86_64 supports dynamic trampolines */
#ifdef CONFIG_X86_64
@@ -920,8 +907,8 @@ static void *addr_from_call(void *ptr)
return NULL;
/* Make sure this is a call */
- if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
- pr_warn("Expected e8, got %x\n", calc.e8);
+ if (WARN_ON_ONCE(calc.op != 0xe8)) {
+ pr_warn("Expected e8, got %x\n", calc.op);
return NULL;
}
@@ -992,6 +979,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+{
+ return ftrace_text_replace(0xe9, ip, addr);
+}
+
static int ftrace_mod_jmp(unsigned long ip, void *func)
{
unsigned char *new;
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index e3e77527f8df..4bfd14d5da8e 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -198,7 +198,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
if (!pgtable_l5_enabled())
return (p4d_t *)pgd;
- p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
+ p4d = pgd_val(*pgd) & PTE_PFN_MASK;
p4d += __START_KERNEL_map - phys_base;
return (p4d_t *)p4d + p4d_index(addr);
}
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 0988971069c9..bfe769209eae 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -51,7 +51,7 @@ static __initdata struct kaslr_memory_region {
} kaslr_regions[] = {
{ &page_offset_base, 0 },
{ &vmalloc_base, 0 },
- { &vmemmap_base, 1 },
+ { &vmemmap_base, 0 },
};
/* Get size in bytes used by the memory region */
@@ -77,6 +77,7 @@ void __init kernel_randomize_memory(void)
unsigned long rand, memory_tb;
struct rnd_state rand_state;
unsigned long remain_entropy;
+ unsigned long vmemmap_size;
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
vaddr = vaddr_start;
@@ -108,6 +109,14 @@ void __init kernel_randomize_memory(void)
if (memory_tb < kaslr_regions[0].size_tb)
kaslr_regions[0].size_tb = memory_tb;
+ /*
+ * Calculate the vmemmap region size in TBs, aligned to a TB
+ * boundary.
+ */
+ vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
+ sizeof(struct page);
+ kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
+
/* Calculate entropy available between regions */
remain_entropy = vaddr_end - vaddr_start;
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 52e55108404e..d3a73f9335e1 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = {
void __init pcibios_irq_init(void)
{
+ struct irq_routing_table *rtable = NULL;
+
DBG(KERN_DEBUG "PCI: IRQ init\n");
if (raw_pci_ops == NULL)
@@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void)
pirq_table = pirq_find_routing_table();
#ifdef CONFIG_PCI_BIOS
- if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
+ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
pirq_table = pcibios_get_irq_routing_table();
+ rtable = pirq_table;
+ }
#endif
if (pirq_table) {
pirq_peer_trick();
@@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void)
* If we're using the I/O APIC, avoid using the PCI IRQ
* routing table
*/
- if (io_apic_assign_pci_irqs)
+ if (io_apic_assign_pci_irqs) {
+ kfree(rtable);
pirq_table = NULL;
+ }
}
x86_init.pci.fixup_irqs();