aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/pci.c8
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/berlin2q-marvell-dmp.dts2
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi63
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts10
-rw-r--r--arch/arm/boot/dts/dra7.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts4
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi6
-rw-r--r--arch/arm/boot/dts/imx25.dtsi10
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts22
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts15
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-evb.dtsi30
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts8
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/vf610-twr.dts15
-rw-r--r--arch/arm/configs/exynos_defconfig18
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h77
-rw-r--r--arch/arm/kernel/entry-header.S13
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/setup.c7
-rw-r--r--arch/arm/kvm/arm.c10
-rw-r--r--arch/arm/kvm/coproc.c70
-rw-r--r--arch/arm/kvm/coproc.h6
-rw-r--r--arch/arm/kvm/coproc_a15.c2
-rw-r--r--arch/arm/kvm/coproc_a7.c2
-rw-r--r--arch/arm/kvm/mmu.c164
-rw-r--r--arch/arm/kvm/trace.h39
-rw-r--r--arch/arm/mach-at91/board-dt-sama5.c18
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6sx.c3
-rw-r--r--arch/arm/mach-mvebu/coherency.c7
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/common.h2
-rw-r--r--arch/arm/mach-omap2/control.h4
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S21
-rw-r--r--arch/arm/mach-omap2/omap-smp.c13
-rw-r--r--arch/arm/mach-omap2/omap4-common.c32
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c1
-rw-r--r--arch/arm/mach-omap2/prcm-common.h1
-rw-r--r--arch/arm/mach-omap2/prm44xx.c5
-rw-r--r--arch/arm/mach-omap2/prm_common.c14
-rw-r--r--arch/arm/mach-omap2/timer.c44
-rw-r--r--arch/arm/mach-omap2/twl-common.c7
-rw-r--r--arch/arm/mach-rockchip/rockchip.c27
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c7
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c9
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c9
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c3
-rw-r--r--arch/arm64/Makefile1
-rw-r--r--arch/arm64/boot/dts/Makefile2
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts2
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h34
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/kvm/hyp.S1
-rw-r--r--arch/arm64/kvm/reset.c1
-rw-r--r--arch/arm64/kvm/sys_regs.c75
-rw-r--r--arch/arm64/mm/dump.c1
-rw-r--r--arch/arm64/mm/init.c8
-rw-r--r--arch/avr32/kernel/module.c13
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c2
-rw-r--r--arch/cris/kernel/module.c2
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c2
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/ia64/kernel/module.c6
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/pci/pci.c48
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/microblaze/pci/pci-common.c13
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/net/bpf_jit.c2
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci.c47
-rw-r--r--arch/nios2/kernel/module.c2
-rw-r--r--arch/nios2/kernel/signal.c2
-rw-r--r--arch/nios2/mm/fault.c2
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/parisc/kernel/module.c6
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/crypto/sha1.c1
-rw-r--r--arch/powerpc/include/asm/thread_info.h13
-rw-r--r--arch/powerpc/kernel/pci-common.c12
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/powerpc/platforms/powernv/setup.c2
-rw-r--r--arch/powerpc/xmon/xmon.c1
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/irqflags.h2
-rw-r--r--arch/s390/include/asm/timex.h10
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/module.c10
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/uprobes.c69
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/s390/mm/pgtable.c5
-rw-r--r--arch/s390/net/bpf_jit.S28
-rw-r--r--arch/s390/net/bpf_jit_comp.c17
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sparc/kernel/pci.c5
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/sparc/net/bpf_jit_comp.c4
-rw-r--r--arch/tile/kernel/module.c4
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/misc.c9
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c2
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/desc.h20
-rw-r--r--arch/x86/include/asm/mmu_context.h20
-rw-r--r--arch/x86/kernel/acpi/boot.c26
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c46
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h18
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c20
-rw-r--r--arch/x86/kernel/tls.c25
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/emulate.c31
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/mpx.c6
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/pci/xen.c49
-rw-r--r--arch/x86/tools/calc_run_size.pl39
-rw-r--r--arch/x86/tools/calc_run_size.sh42
-rw-r--r--arch/x86/xen/enlighten.c22
-rw-r--r--arch/x86/xen/p2m.c20
-rw-r--r--arch/x86/xen/setup.c42
-rw-r--r--arch/x86/xen/time.c18
-rw-r--r--arch/xtensa/mm/fault.c2
168 files changed, 1295 insertions, 665 deletions
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 076c35cd6cde..98a1525fa164 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -285,8 +285,12 @@ pcibios_claim_one_bus(struct pci_bus *b)
if (r->parent || !r->start || !r->flags)
continue;
if (pci_has_flag(PCI_PROBE_ONLY) ||
- (r->flags & IORESOURCE_PCI_FIXED))
- pci_claim_resource(dev, i);
+ (r->flags & IORESOURCE_PCI_FIXED)) {
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
+ }
}
}
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 98838a05ba6d..9d0ac091a52a 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -156,6 +156,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 6f7e3a68803a..563cb27e37f5 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -161,6 +161,8 @@ good_area:
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 1467750e3377..e8c6c600a5b6 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -953,6 +953,8 @@
interrupts = <26 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fb>;
+ clocks = <&lcd_clk>, <&lcd_clk>;
+ clock-names = "lcdc_clk", "hclk";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
index 28e7e2060c33..a98ac1bd8f65 100644
--- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
+++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
@@ -65,6 +65,8 @@
};
&sdhci2 {
+ broken-cd;
+ bus-width = <8>;
non-removable;
status = "okay";
};
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 35253c947a7c..e2f61f27944e 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -83,7 +83,8 @@
compatible = "mrvl,pxav3-mmc";
reg = <0xab1000 0x200>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&chip CLKID_SDIO1XIN>;
+ clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>;
+ clock-names = "io", "core";
status = "disabled";
};
@@ -348,36 +349,6 @@
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
};
-
- gpio4: gpio@5000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0x5000 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- porte: gpio-port@4 {
- compatible = "snps,dw-apb-gpio-port";
- gpio-controller;
- #gpio-cells = <2>;
- snps,nr-gpios = <32>;
- reg = <0>;
- };
- };
-
- gpio5: gpio@c000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0xc000 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- portf: gpio-port@5 {
- compatible = "snps,dw-apb-gpio-port";
- gpio-controller;
- #gpio-cells = <2>;
- snps,nr-gpios = <32>;
- reg = <0>;
- };
- };
};
chip: chip-control@ea0000 {
@@ -466,6 +437,21 @@
ranges = <0 0xfc0000 0x10000>;
interrupt-parent = <&sic>;
+ sm_gpio1: gpio@5000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x5000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ portf: gpio-port@5 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ };
+ };
+
i2c2: i2c@7000 {
compatible = "snps,designware-i2c";
#address-cells = <1>;
@@ -516,6 +502,21 @@
status = "disabled";
};
+ sm_gpio0: gpio@c000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0xc000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ porte: gpio-port@4 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ };
+ };
+
sysctrl: pin-controller@d000 {
compatible = "marvell,berlin2q-system-ctrl";
reg = <0xd000 0x100>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 10b725c7bfc0..ad4118f7e1a6 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -499,23 +499,23 @@
};
partition@5 {
label = "QSPI.u-boot-spl-os";
- reg = <0x00140000 0x00010000>;
+ reg = <0x00140000 0x00080000>;
};
partition@6 {
label = "QSPI.u-boot-env";
- reg = <0x00150000 0x00010000>;
+ reg = <0x001c0000 0x00010000>;
};
partition@7 {
label = "QSPI.u-boot-env.backup1";
- reg = <0x00160000 0x0010000>;
+ reg = <0x001d0000 0x0010000>;
};
partition@8 {
label = "QSPI.kernel";
- reg = <0x00170000 0x0800000>;
+ reg = <0x001e0000 0x0800000>;
};
partition@9 {
label = "QSPI.file-system";
- reg = <0x00970000 0x01690000>;
+ reg = <0x009e0000 0x01620000>;
};
};
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 22771bc1643a..63f8b007bdc5 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1257,6 +1257,8 @@
tx-fifo-resize;
maximum-speed = "super-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
@@ -1278,6 +1280,8 @@
tx-fifo-resize;
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
@@ -1299,6 +1303,8 @@
tx-fifo-resize;
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 0a229fcd7acf..d75c89d7666a 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -736,7 +736,7 @@
dp_phy: video-phy@10040720 {
compatible = "samsung,exynos5250-dp-video-phy";
- reg = <0x10040720 4>;
+ samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index aa7a7d727a7e..db2c1c4cd900 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -372,3 +372,7 @@
&usbdrd_dwc3_1 {
dr_mode = "host";
};
+
+&cci {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 517e50f6760b..6d38f8bfd0e6 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -120,7 +120,7 @@
};
};
- cci@10d20000 {
+ cci: cci@10d20000 {
compatible = "arm,cci-400";
#address-cells = <1>;
#size-cells = <1>;
@@ -503,8 +503,8 @@
};
dp_phy: video-phy@10040728 {
- compatible = "samsung,exynos5250-dp-video-phy";
- reg = <0x10040728 4>;
+ compatible = "samsung,exynos5420-dp-video-phy";
+ samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 58d3c3cf2923..e4d3aecc4ed2 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -162,7 +162,7 @@
#size-cells = <0>;
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x43fa4000 0x4000>;
- clocks = <&clks 62>, <&clks 62>;
+ clocks = <&clks 78>, <&clks 78>;
clock-names = "ipg", "per";
interrupts = <14>;
status = "disabled";
@@ -369,7 +369,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fa0000 0x4000>;
- clocks = <&clks 106>, <&clks 36>;
+ clocks = <&clks 106>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <36>;
};
@@ -388,7 +388,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fa8000 0x4000>;
- clocks = <&clks 107>, <&clks 36>;
+ clocks = <&clks 107>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <41>;
};
@@ -429,7 +429,7 @@
pwm4: pwm@53fc8000 {
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
reg = <0x53fc8000 0x4000>;
- clocks = <&clks 108>, <&clks 36>;
+ clocks = <&clks 108>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <42>;
};
@@ -476,7 +476,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fe0000 0x4000>;
- clocks = <&clks 105>, <&clks 36>;
+ clocks = <&clks 105>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <26>;
};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 56569cecaa78..649befeb2cf9 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -127,24 +127,12 @@
#address-cells = <1>;
#size-cells = <0>;
- reg_usbh1_vbus: regulator@0 {
- compatible = "regulator-fixed";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbh1reg>;
- reg = <0>;
- regulator-name = "usbh1_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- reg_usbotg_vbus: regulator@1 {
+ reg_hub_reset: regulator@0 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotgreg>;
- reg = <1>;
- regulator-name = "usbotg_vbus";
+ reg = <0>;
+ regulator-name = "hub_reset";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
@@ -176,6 +164,7 @@
reg = <0>;
clocks = <&clks IMX5_CLK_DUMMY>;
clock-names = "main_clk";
+ reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
};
};
};
@@ -419,7 +408,7 @@
&usbh1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbh1>;
- vbus-supply = <&reg_usbh1_vbus>;
+ vbus-supply = <&reg_hub_reset>;
fsl,usbphy = <&usbh1phy>;
phy_type = "ulpi";
status = "okay";
@@ -429,7 +418,6 @@
dr_mode = "otg";
disable-over-current;
phy_type = "utmi_wide";
- vbus-supply = <&reg_usbotg_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 4fc03b7f1cee..2109d0763c1b 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -335,8 +335,8 @@
vpu: vpu@02040000 {
compatible = "cnm,coda960";
reg = <0x02040000 0x3c000>;
- interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>,
- <0 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 3 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "bit", "jpeg";
clocks = <&clks IMX6QDL_CLK_VPU_AXI>,
<&clks IMX6QDL_CLK_MMDC_CH0_AXI>,
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 1e6e5cc1c14c..c108bb451337 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -159,13 +159,28 @@
pinctrl-0 = <&pinctrl_enet1>;
phy-supply = <&reg_enet_3v3>;
phy-mode = "rgmii";
+ phy-handle = <&ethphy1>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ };
};
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
+ phy-handle = <&ethphy2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 657da14cb4b5..c70bb27ac65a 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -142,6 +142,7 @@
scfg: scfg@1570000 {
compatible = "fsl,ls1021a-scfg", "syscon";
reg = <0x0 0x1570000 0x0 0x10000>;
+ big-endian;
};
clockgen: clocking@1ee1000 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 53f3ca064140..b550c41b46f1 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -700,11 +700,9 @@
};
};
+ /* Ethernet is on some early development boards and qemu */
ethernet@gpmc {
compatible = "smsc,lan91c94";
-
- status = "disabled";
-
interrupt-parent = <&gpio2>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */
reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 3e067dd65d0c..6194d673e80b 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -155,6 +155,15 @@
};
&pinctrl {
+ pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
backlight {
bl_en: bl-en {
rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -173,6 +182,27 @@
};
};
+ sdmmc {
+ /*
+ * Default drive strength isn't enough to achieve even
+ * high-speed mode on EVB board so bump up to 8ma.
+ */
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+ };
+
usb {
host_vbus_drv: host-vbus-drv {
rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index 49c10d33df30..77e03655aca3 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -176,7 +176,7 @@
"Headphone Jack", "HPOUTR",
"IN2L", "Line In Jack",
"IN2R", "Line In Jack",
- "MICBIAS", "IN1L",
+ "Mic", "MICBIAS",
"IN1L", "Mic";
atmel,ssc-controller = <&ssc0>;
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 1b0f30c2c4a5..b94995d1889f 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1008,7 +1008,7 @@
pit: timer@fc068630 {
compatible = "atmel,at91sam9260-pit";
- reg = <0xfc068630 0xf>;
+ reg = <0xfc068630 0x10>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>;
clocks = <&h32ck>;
};
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index a8c00ee7522a..3d0b8755caee 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -25,11 +25,11 @@
stmpe2401_1 {
stmpe2401_1_nhk_mode: stmpe2401_1_nhk {
nhk_cfg1 {
- ste,pins = "GPIO76_B20"; // IRQ line
+ pins = "GPIO76_B20"; // IRQ line
ste,input = <0>;
};
nhk_cfg2 {
- ste,pins = "GPIO77_B8"; // reset line
+ pins = "GPIO77_B8"; // reset line
ste,output = <1>;
};
};
@@ -37,11 +37,11 @@
stmpe2401_2 {
stmpe2401_2_nhk_mode: stmpe2401_2_nhk {
nhk_cfg1 {
- ste,pins = "GPIO78_A8"; // IRQ line
+ pins = "GPIO78_A8"; // IRQ line
ste,input = <0>;
};
nhk_cfg2 {
- ste,pins = "GPIO79_C9"; // reset line
+ pins = "GPIO79_C9"; // reset line
ste,output = <1>;
};
};
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index ea282c7c0ca5..e2fed2712249 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -406,7 +406,7 @@
clock-frequency = <400000>;
magnetometer@c {
- compatible = "ak,ak8975";
+ compatible = "asahi-kasei,ak8975";
reg = <0xc>;
interrupt-parent = <&gpio>;
interrupts = <TEGRA_GPIO(N, 5) IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts
index a0f762159cb2..f2b64b1b00fa 100644
--- a/arch/arm/boot/dts/vf610-twr.dts
+++ b/arch/arm/boot/dts/vf610-twr.dts
@@ -129,13 +129,28 @@
&fec0 {
phy-mode = "rmii";
+ phy-handle = <&ethphy0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec0>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
};
&fec1 {
phy-mode = "rmii";
+ phy-handle = <&ethphy1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
status = "okay";
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 5ef14de00a29..3d0c5d65c741 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -84,7 +84,8 @@ CONFIG_DEBUG_GPIO=y
CONFIG_POWER_SUPPLY=y
CONFIG_BATTERY_SBS=y
CONFIG_CHARGER_TPS65090=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=y
+CONFIG_SENSORS_LM90=y
CONFIG_THERMAL=y
CONFIG_EXYNOS_THERMAL=y
CONFIG_EXYNOS_THERMAL_CORE=y
@@ -109,11 +110,26 @@ CONFIG_REGULATOR_S2MPA01=y
CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
CONFIG_REGULATOR_TPS65090=y
+CONFIG_DRM=y
+CONFIG_DRM_BRIDGE=y
+CONFIG_DRM_PTN3460=y
+CONFIG_DRM_PS8622=y
+CONFIG_DRM_EXYNOS=y
+CONFIG_DRM_EXYNOS_FIMD=y
+CONFIG_DRM_EXYNOS_DP=y
+CONFIG_DRM_PANEL=y
+CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_SIMPLE=y
CONFIG_EXYNOS_VIDEO=y
CONFIG_EXYNOS_MIPI_DSI=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+CONFIG_BACKLIGHT_PWM=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_7x14=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c2c3a852af9f..667d9d52aa01 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
CONFIG_CPU_IDLE=y
CONFIG_BINFMT_MISC=y
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 66ce17655bb9..7b0152321b20 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK;
}
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hcr;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+ vcpu->arch.hcr = hcr;
+}
+
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 254e0650e48b..04b4ea0b550a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
/* Don't run the guest on this vcpu */
bool pause;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 63e0ecc04901..1bca8f8af442 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -44,6 +44,7 @@
#ifndef __ASSEMBLY__
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
}
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size,
- bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc((void *)hva, size);
-
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ *
+ * We need to do this through a kernel mapping (using the
+ * user-space mapping has proved to be the wrong
+ * solution). For that, we need to kmap one page at a time,
+ * and iterate over the range.
*/
- if (icache_is_pipt()) {
- __cpuc_coherent_user_range(hva, hva + size);
- } else if (!icache_is_vivt_asid_tagged()) {
+
+ bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+
+ VM_BUG_ON(size & PAGE_MASK);
+
+ if (!need_flush && !icache_is_pipt())
+ goto vipt_cache;
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ if (need_flush)
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ if (icache_is_pipt())
+ __cpuc_coherent_user_range((unsigned long)va,
+ (unsigned long)va + PAGE_SIZE);
+
+ size -= PAGE_SIZE;
+ pfn++;
+
+ kunmap_atomic(va);
+ }
+
+vipt_cache:
+ if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ void *va = kmap_atomic(pte_page(pte));
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ kunmap_atomic(va);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ unsigned long size = PMD_SIZE;
+ pfn_t pfn = pmd_pfn(pmd);
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ pfn++;
+ size -= PAGE_SIZE;
+
+ kunmap_atomic(va);
+ }
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+}
+
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 4176df721bf0..1a0045abead7 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -253,21 +253,22 @@
.endm
.macro restore_user_regs, fast = 0, offset = 0
- ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
- ldr lr, [sp, #\offset + S_PC]! @ get pc
+ mov r2, sp
+ ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
+ ldr lr, [r2, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
- strex r1, r2, [sp] @ clear the exclusive monitor
+ strex r1, r2, [r2] @ clear the exclusive monitor
#endif
.if \fast
- ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
+ ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
.else
- ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
+ ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
.endif
mov r0, r0 @ ARMv5T and earlier require a nop
@ after ldm {}^
- add sp, sp, #S_FRAME_SIZE - S_PC
+ add sp, sp, #\offset + S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
.endm
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f7c65adaa428..557e128e4df0 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event)
ret = 1;
}
- if (left > (s64)armpmu->max_period)
- left = armpmu->max_period;
+ /*
+ * Limit the maximum period to prevent the counter value
+ * from overtaking the one we are about to program. In
+ * effect we are reducing max_period to account for
+ * interrupt latency (and we are being very conservative).
+ */
+ if (left > (armpmu->max_period >> 1))
+ left = armpmu->max_period >> 1;
local64_set(&hwc->prev_count, (u64)-left);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 715ae19bc7c8..e55408e96559 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size)
/*
* Ensure that start/size are aligned to a page boundary.
- * Size is appropriately rounded down, start is rounded up.
+ * Size is rounded down, start is rounded up.
*/
- size -= start & ~PAGE_MASK;
aligned_start = PAGE_ALIGN(start);
+ if (aligned_start > start + size)
+ size = 0;
+ else
+ size -= aligned_start - start;
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
if (aligned_start > ULONG_MAX) {
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2d6d91001062..0b0d58a905c4 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
- /*
- * Check whether this vcpu requires the cache to be flushed on
- * this physical CPU. This is a consequence of doing dcache
- * operations by set/way on this vcpu. We do it here to be in
- * a non-preemptible section.
- */
- if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
- flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
-
kvm_arm_set_running_vcpu(vcpu);
}
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
- vcpu->arch.last_pcpu = smp_processor_id();
kvm_guest_exit();
trace_kvm_exit(*vcpu_pc(vcpu));
/*
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7928dbdf2102..f3d88dc388bc 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
return true;
}
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
- unsigned long val;
- int cpu;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
- cpumask_setall(&vcpu->arch.require_dcache_flush);
- cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
- /* If we were already preempted, take the long way around */
- if (cpu != vcpu->arch.last_pcpu) {
- flush_cache_all();
- goto done;
- }
-
- val = *vcpu_reg(vcpu, p->Rt1);
-
- switch (p->CRm) {
- case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
- case 14: /* DCCISW */
- asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
- break;
-
- case 10: /* DCCSW */
- asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
- break;
- }
-
-done:
- put_cpu();
-
+ kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
+ *
+ * Used by the cpu-specific code.
*/
-static bool access_vm_reg(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
{
+ bool was_enabled = vcpu_has_cache_enabled(vcpu);
+
BUG_ON(!p->is_write);
vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
if (p->is_64bit)
vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
- return true;
-}
-
-/*
- * SCTLR accessor. Only called as long as HCR_TVM is set. If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- *
- * Used by the cpu-specific code.
- */
-bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- access_vm_reg(vcpu, p, r);
-
- if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
- vcpu->arch.hcr &= ~HCR_TVM;
- stage2_flush_vm(vcpu->kvm);
- }
-
+ kvm_toggle_cache(vcpu, was_enabled);
return true;
}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 1a44bbe39643..88d24a3a9778 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define is64 .is_64 = true
#define is32 .is_64 = false
-bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r);
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r);
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index e6f4ae48bda9..a7136757d373 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -34,7 +34,7 @@
static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
- access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+ access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
};
static struct kvm_coproc_target_table a15_target_table = {
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
index 17fc7cd479d3..b19e46d1b2c0 100644
--- a/arch/arm/kvm/coproc_a7.c
+++ b/arch/arm/kvm/coproc_a7.c
@@ -37,7 +37,7 @@
static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
- access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
+ access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
};
static struct kvm_coproc_target_table a7_target_table = {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 1dc9778a00af..136662547ca6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}
+/*
+ * D-Cache management functions. They take the page table entries by
+ * value, as they are flushing the cache using the kernel mapping (or
+ * kmap on 32bit).
+ */
+static void kvm_flush_dcache_pte(pte_t pte)
+{
+ __kvm_flush_dcache_pte(pte);
+}
+
+static void kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ __kvm_flush_dcache_pmd(pmd);
+}
+
+static void kvm_flush_dcache_pud(pud_t pud)
+{
+ __kvm_flush_dcache_pud(pud);
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
int min, int max)
{
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
put_page(virt_to_page(pmd));
}
+/*
+ * Unmapping vs dcache management:
+ *
+ * If a guest maps certain memory pages as uncached, all writes will
+ * bypass the data cache and go directly to RAM. However, the CPUs
+ * can still speculate reads (not writes) and fill cache lines with
+ * data.
+ *
+ * Those cache lines will be *clean* cache lines though, so a
+ * clean+invalidate operation is equivalent to an invalidate
+ * operation, because no cache lines are marked dirty.
+ *
+ * Those clean cache lines could be filled prior to an uncached write
+ * by the guest, and the cache coherent IO subsystem would therefore
+ * end up writing old data to disk.
+ *
+ * This is why right after unmapping a page/section and invalidating
+ * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
+ * the IO subsystem will never hit in the cache.
+ */
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
start_pte = pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte)) {
+ pte_t old_pte = *pte;
+
kvm_set_pte(pte, __pte(0));
- put_page(virt_to_page(pte));
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ /* No need to invalidate the cache for device mappings */
+ if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ kvm_flush_dcache_pte(old_pte);
+
+ put_page(virt_to_page(pte));
}
} while (pte++, addr += PAGE_SIZE, addr != end);
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) {
+ pmd_t old_pmd = *pmd;
+
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ kvm_flush_dcache_pmd(old_pmd);
+
put_page(virt_to_page(pmd));
} else {
unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
if (pud_huge(*pud)) {
+ pud_t old_pud = *pud;
+
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ kvm_flush_dcache_pud(old_pud);
+
put_page(virt_to_page(pud));
} else {
unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
- if (!pte_none(*pte)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
- }
+ if (!pte_none(*pte) &&
+ (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
do {
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
- if (kvm_pmd_huge(*pmd)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
- } else {
+ if (kvm_pmd_huge(*pmd))
+ kvm_flush_dcache_pmd(*pmd);
+ else
stage2_flush_ptes(kvm, pmd, addr, next);
- }
}
} while (pmd++, addr = next, addr != end);
}
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
do {
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
- if (pud_huge(*pud)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
- } else {
+ if (pud_huge(*pud))
+ kvm_flush_dcache_pud(*pud);
+ else
stage2_flush_pmds(kvm, pud, addr, next);
- }
}
} while (pud++, addr = next, addr != end);
}
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
* Go through the stage 2 page tables and invalidate any cache lines
* backing memory already mapped to the VM.
*/
-void stage2_flush_vm(struct kvm *kvm)
+static void stage2_flush_vm(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
return !pfn_valid(pfn);
}
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size, bool uncached)
+{
+ __coherent_cache_guest_page(vcpu, pfn, size, uncached);
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pmd_writable(&new_pmd);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
- fault_ipa_uncached);
+ coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
- fault_ipa_uncached);
+ coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
}
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
unmap_stage2_range(kvm, gpa, size);
spin_unlock(&kvm->mmu_lock);
}
+
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ *
+ * Main problems:
+ * - S/W ops are local to a CPU (not broadcast)
+ * - We have line migration behind our back (speculation)
+ * - System caches don't support S/W at all (damn!)
+ *
+ * In the face of the above, the best we can do is to try and convert
+ * S/W ops to VA ops. Because the guest is not allowed to infer the
+ * S/W to PA mapping, it can only use S/W to nuke the whole cache,
+ * which is a rather good thing for us.
+ *
+ * Also, it is only used when turning caches on/off ("The expected
+ * usage of the cache maintenance instructions that operate by set/way
+ * is associated with the cache maintenance instructions associated
+ * with the powerdown and powerup of caches, if this is required by
+ * the implementation.").
+ *
+ * We use the following policy:
+ *
+ * - If we trap a S/W operation, we enable VM trapping to detect
+ * caches being turned on/off, and do a full clean.
+ *
+ * - We flush the caches on both caches being turned on and off.
+ *
+ * - Once the caches are enabled, we stop trapping VM ops.
+ */
+void kvm_set_way_flush(struct kvm_vcpu *vcpu)
+{
+ unsigned long hcr = vcpu_get_hcr(vcpu);
+
+ /*
+ * If this is the first time we do a S/W operation
+ * (i.e. HCR_TVM not set) flush the whole memory, and set the
+ * VM trapping.
+ *
+ * Otherwise, rely on the VM trapping to wait for the MMU +
+ * Caches to be turned off. At that point, we'll be able to
+ * clean the caches again.
+ */
+ if (!(hcr & HCR_TVM)) {
+ trace_kvm_set_way_flush(*vcpu_pc(vcpu),
+ vcpu_has_cache_enabled(vcpu));
+ stage2_flush_vm(vcpu->kvm);
+ vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+ }
+}
+
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
+{
+ bool now_enabled = vcpu_has_cache_enabled(vcpu);
+
+ /*
+ * If switching the MMU+caches on, need to invalidate the caches.
+ * If switching it off, need to clean the caches.
+ * Clean + invalidate does the trick always.
+ */
+ if (now_enabled != was_enabled)
+ stage2_flush_vm(vcpu->kvm);
+
+ /* Caches are now on, stop trapping VM ops (until a S/W op) */
+ if (now_enabled)
+ vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+
+ trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
+}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index b1d640f78623..b6a6e7102201 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
+TRACE_EVENT(kvm_set_way_flush,
+ TP_PROTO(unsigned long vcpu_pc, bool cache),
+ TP_ARGS(vcpu_pc, cache),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( bool, cache )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->cache = cache;
+ ),
+
+ TP_printk("S/W flush at 0x%016lx (cache %s)",
+ __entry->vcpu_pc, __entry->cache ? "on" : "off")
+);
+
+TRACE_EVENT(kvm_toggle_cache,
+ TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
+ TP_ARGS(vcpu_pc, was, now),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( bool, was )
+ __field( bool, now )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->was = was;
+ __entry->now = now;
+ ),
+
+ TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
+ __entry->vcpu_pc, __entry->was ? "on" : "off",
+ __entry->now ? "on" : "off")
+);
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm/mach-at91/board-dt-sama5.c b/arch/arm/mach-at91/board-dt-sama5.c
index 8fb9ef5333f1..97f7367d32b8 100644
--- a/arch/arm/mach-at91/board-dt-sama5.c
+++ b/arch/arm/mach-at91/board-dt-sama5.c
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/clk-provider.h>
+#include <linux/phy.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -26,8 +27,25 @@
#include "generic.h"
+static int ksz8081_phy_fixup(struct phy_device *phy)
+{
+ int value;
+
+ value = phy_read(phy, 0x16);
+ value &= ~0x20;
+ phy_write(phy, 0x16, value);
+
+ return 0;
+}
+
static void __init sama5_dt_device_init(void)
{
+ if (of_machine_is_compatible("atmel,sama5d4ek") &&
+ IS_ENABLED(CONFIG_PHYLIB)) {
+ phy_register_fixup_for_id("fc028000.etherne:00",
+ ksz8081_phy_fixup);
+ }
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 5951660d1bd2..2daef619d053 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -144,7 +144,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
post_div_table[1].div = 1;
post_div_table[2].div = 1;
video_div_table[1].div = 1;
- video_div_table[2].div = 1;
+ video_div_table[3].div = 1;
}
clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/arch/arm/mach-imx/clk-imx6sx.c b/arch/arm/mach-imx/clk-imx6sx.c
index 17354a11356f..5a3e5a159e70 100644
--- a/arch/arm/mach-imx/clk-imx6sx.c
+++ b/arch/arm/mach-imx/clk-imx6sx.c
@@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
+ clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+ clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+
/* Set initial power mode */
imx6q_set_lpm(WAIT_CLOCKED);
}
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 3585cb394e9b..caa21e9b8cd9 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -246,9 +246,14 @@ static int coherency_type(void)
return type;
}
+/*
+ * As a precaution, we currently completely disable hardware I/O
+ * coherency, until enough testing is done with automatic I/O
+ * synchronization barriers to validate that it is a proper solution.
+ */
int coherency_available(void)
{
- return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
+ return false;
}
int __init coherency_init(void)
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 608079a1aba6..b61c049f92d6 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -77,6 +77,24 @@ MACHINE_END
#endif
#ifdef CONFIG_ARCH_OMAP3
+/* Some boards need board name for legacy userspace in /proc/cpuinfo */
+static const char *const n900_boards_compat[] __initconst = {
+ "nokia,omap3-n900",
+ NULL,
+};
+
+DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
+ .reserve = omap_reserve,
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
+ .init_time = omap3_sync32k_timer_init,
+ .dt_compat = n900_boards_compat,
+ .restart = omap3xxx_restart,
+MACHINE_END
+
+/* Generic omap3 boards, most boards can use these */
static const char *const omap3_boards_compat[] __initconst = {
"ti,omap3430",
"ti,omap3",
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 377eea849e7b..64e44d6d07c0 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -211,6 +211,7 @@ extern struct device *omap2_get_iva_device(void);
extern struct device *omap2_get_l3_device(void);
extern struct device *omap4_get_dsp_device(void);
+unsigned int omap4_xlate_irq(unsigned int hwirq);
void omap_gic_of_init(void);
#ifdef CONFIG_CACHE_L2X0
@@ -249,6 +250,7 @@ extern void omap4_cpu_die(unsigned int cpu);
extern struct smp_operations omap4_smp_ops;
extern void omap5_secondary_startup(void);
+extern void omap5_secondary_hyp_startup(void);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index a3c013345c45..a80ac2d70bb1 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -286,6 +286,10 @@
#define OMAP5XXX_CONTROL_STATUS 0x134
#define OMAP5_DEVICETYPE_MASK (0x7 << 6)
+/* DRA7XX CONTROL CORE BOOTSTRAP */
+#define DRA7_CTRL_CORE_BOOTSTRAP 0x6c4
+#define DRA7_SPEEDSELECT_MASK (0x3 << 8)
+
/*
* REVISIT: This list of registers is not comprehensive - there are more
* that should be added.
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4993d4bfe9b2..6d1dffca6c7b 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -22,6 +22,7 @@
/* Physical address needed since MMU not enabled yet on secondary core */
#define AUX_CORE_BOOT0_PA 0x48281800
+#define API_HYP_ENTRY 0x102
/*
* OMAP5 specific entry point for secondary CPU to jump from ROM
@@ -41,6 +42,26 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
b secondary_startup
ENDPROC(omap5_secondary_startup)
/*
+ * Same as omap5_secondary_startup except we call into the ROM to
+ * enable HYP mode first. This is called instead of
+ * omap5_secondary_startup if the primary CPU was put into HYP mode by
+ * the boot loader.
+ */
+ENTRY(omap5_secondary_hyp_startup)
+wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
+ ldr r0, [r2]
+ mov r0, r0, lsr #5
+ mrc p15, 0, r4, c0, c0, 5
+ and r4, r4, #0x0f
+ cmp r0, r4
+ bne wait_2
+ ldr r12, =API_HYP_ENTRY
+ adr r0, hyp_boot
+ smc #0
+hyp_boot:
+ b secondary_startup
+ENDPROC(omap5_secondary_hyp_startup)
+/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 256e84ef0f67..5305ec7341ec 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -22,6 +22,7 @@
#include <linux/irqchip/arm-gic.h>
#include <asm/smp_scu.h>
+#include <asm/virt.h>
#include "omap-secure.h"
#include "omap-wakeupgen.h"
@@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
if (omap_secure_apis_support())
omap_auxcoreboot_addr(virt_to_phys(startup_addr));
else
- writel_relaxed(virt_to_phys(omap5_secondary_startup),
- base + OMAP_AUX_CORE_BOOT_1);
+ /*
+ * If the boot CPU is in HYP mode then start secondary
+ * CPU in HYP mode as well.
+ */
+ if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
+ writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
+ base + OMAP_AUX_CORE_BOOT_1);
+ else
+ writel_relaxed(virt_to_phys(omap5_secondary_startup),
+ base + OMAP_AUX_CORE_BOOT_1);
}
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index b7cb44abe49b..cc30e49a4cc2 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -256,6 +256,38 @@ static int __init omap4_sar_ram_init(void)
}
omap_early_initcall(omap4_sar_ram_init);
+static struct of_device_id gic_match[] = {
+ { .compatible = "arm,cortex-a9-gic", },
+ { .compatible = "arm,cortex-a15-gic", },
+ { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int omap4_xlate_irq(unsigned int hwirq)
+{
+ struct of_phandle_args irq_data;
+ unsigned int irq;
+
+ if (!gic_node)
+ gic_node = of_find_matching_node(NULL, gic_match);
+
+ if (WARN_ON(!gic_node))
+ return hwirq;
+
+ irq_data.np = gic_node;
+ irq_data.args_count = 3;
+ irq_data.args[0] = 0;
+ irq_data.args[1] = hwirq - OMAP44XX_IRQ_GIC_START;
+ irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ irq = irq_create_of_mapping(&irq_data);
+ if (WARN_ON(!irq))
+ irq = hwirq;
+
+ return irq;
+}
+
void __init omap_gic_of_init(void)
{
struct device_node *np;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cbb908dc5cf0..9025ffffd2dc 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -3534,9 +3534,15 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
mpu_irqs_cnt = _count_mpu_irqs(oh);
for (i = 0; i < mpu_irqs_cnt; i++) {
+ unsigned int irq;
+
+ if (oh->xlate_irq)
+ irq = oh->xlate_irq((oh->mpu_irqs + i)->irq);
+ else
+ irq = (oh->mpu_irqs + i)->irq;
(res + r)->name = (oh->mpu_irqs + i)->name;
- (res + r)->start = (oh->mpu_irqs + i)->irq;
- (res + r)->end = (oh->mpu_irqs + i)->irq;
+ (res + r)->start = irq;
+ (res + r)->end = irq;
(res + r)->flags = IORESOURCE_IRQ;
r++;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 35ca6efbec31..5b42fafcaf55 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -676,6 +676,7 @@ struct omap_hwmod {
spinlock_t _lock;
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
+ unsigned int (*xlate_irq)(unsigned int);
u16 flags;
u8 mpu_rt_idx;
u8 response_lat;
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index c314b3c31117..f5e68a782025 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -479,6 +479,7 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
.class = &omap44xx_dma_hwmod_class,
.clkdm_name = "l3_dma_clkdm",
.mpu_irqs = omap44xx_dma_system_irqs,
+ .xlate_irq = omap4_xlate_irq,
.main_clk = "l3_div_ck",
.prcm = {
.omap4 = {
@@ -640,6 +641,7 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.class = &omap44xx_dispc_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dispc_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dispc_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -693,6 +695,7 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
.class = &omap44xx_dsi_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dsi1_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dsi1_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -726,6 +729,7 @@ static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
.class = &omap44xx_dsi_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dsi2_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dsi2_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -784,6 +788,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
*/
.flags = HWMOD_SWSUP_SIDLE,
.mpu_irqs = omap44xx_dss_hdmi_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
.main_clk = "dss_48mhz_clk",
.prcm = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 3e9523084b2a..7c3fac035e93 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -288,6 +288,7 @@ static struct omap_hwmod omap54xx_dma_system_hwmod = {
.class = &omap54xx_dma_hwmod_class,
.clkdm_name = "dma_clkdm",
.mpu_irqs = omap54xx_dma_system_irqs,
+ .xlate_irq = omap4_xlate_irq,
.main_clk = "l3_iclk_div",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index a8e4b582c527..6163d66102a3 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -498,6 +498,7 @@ struct omap_prcm_irq_setup {
u8 nr_irqs;
const struct omap_prcm_irq *irqs;
int irq;
+ unsigned int (*xlate_irq)(unsigned int);
void (*read_pending_irqs)(unsigned long *events);
void (*ocp_barrier)(void);
void (*save_and_clear_irqen)(u32 *saved_mask);
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index cc170fb81ff7..408c64efb807 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -49,6 +49,7 @@ static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
.irqs = omap4_prcm_irqs,
.nr_irqs = ARRAY_SIZE(omap4_prcm_irqs),
.irq = 11 + OMAP44XX_IRQ_GIC_START,
+ .xlate_irq = omap4_xlate_irq,
.read_pending_irqs = &omap44xx_prm_read_pending_irqs,
.ocp_barrier = &omap44xx_prm_ocp_barrier,
.save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen,
@@ -751,8 +752,10 @@ static int omap44xx_prm_late_init(void)
}
/* Once OMAP4 DT is filled as well */
- if (irq_num >= 0)
+ if (irq_num >= 0) {
omap4_prcm_irq_setup.irq = irq_num;
+ omap4_prcm_irq_setup.xlate_irq = NULL;
+ }
}
omap44xx_prm_enable_io_wakeup();
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 779940cb6e56..dea2833ca627 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -187,6 +187,7 @@ int omap_prcm_event_to_irq(const char *name)
*/
void omap_prcm_irq_cleanup(void)
{
+ unsigned int irq;
int i;
if (!prcm_irq_setup) {
@@ -211,7 +212,11 @@ void omap_prcm_irq_cleanup(void)
kfree(prcm_irq_setup->priority_mask);
prcm_irq_setup->priority_mask = NULL;
- irq_set_chained_handler(prcm_irq_setup->irq, NULL);
+ if (prcm_irq_setup->xlate_irq)
+ irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq);
+ else
+ irq = prcm_irq_setup->irq;
+ irq_set_chained_handler(irq, NULL);
if (prcm_irq_setup->base_irq > 0)
irq_free_descs(prcm_irq_setup->base_irq,
@@ -259,6 +264,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
int offset, i;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ unsigned int irq;
if (!irq_setup)
return -EINVAL;
@@ -298,7 +304,11 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
1 << (offset & 0x1f);
}
- irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
+ if (irq_setup->xlate_irq)
+ irq = irq_setup->xlate_irq(irq_setup->irq);
+ else
+ irq = irq_setup->irq;
+ irq_set_chained_handler(irq, omap_prcm_irq_handler);
irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
0);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 4f61148ec168..7d45c84c69ba 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -54,6 +54,7 @@
#include "soc.h"
#include "common.h"
+#include "control.h"
#include "powerdomain.h"
#include "omap-secure.h"
@@ -496,7 +497,8 @@ static void __init realtime_counter_init(void)
void __iomem *base;
static struct clk *sys_clk;
unsigned long rate;
- unsigned int reg, num, den;
+ unsigned int reg;
+ unsigned long long num, den;
base = ioremap(REALTIME_COUNTER_BASE, SZ_32);
if (!base) {
@@ -511,13 +513,42 @@ static void __init realtime_counter_init(void)
}
rate = clk_get_rate(sys_clk);
+
+ if (soc_is_dra7xx()) {
+ /*
+ * Errata i856 says the 32.768KHz crystal does not start at
+ * power on, so the CPU falls back to an emulated 32KHz clock
+ * based on sysclk / 610 instead. This causes the master counter
+ * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2
+ * (OR sysclk * 75 / 244)
+ *
+ * This affects at least the DRA7/AM572x 1.0, 1.1 revisions.
+ * Of course any board built without a populated 32.768KHz
+ * crystal would also need this fix even if the CPU is fixed
+ * later.
+ *
+ * Either case can be detected by using the two speedselect bits
+ * If they are not 0, then the 32.768KHz clock driving the
+ * coarse counter that corrects the fine counter every time it
+ * ticks is actually rate/610 rather than 32.768KHz and we
+ * should compensate to avoid the 570ppm (at 20MHz, much worse
+ * at other rates) too fast system time.
+ */
+ reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP);
+ if (reg & DRA7_SPEEDSELECT_MASK) {
+ num = 75;
+ den = 244;
+ goto sysclk1_based;
+ }
+ }
+
/* Numerator/denumerator values refer TRM Realtime Counter section */
switch (rate) {
- case 1200000:
+ case 12000000:
num = 64;
den = 125;
break;
- case 1300000:
+ case 13000000:
num = 768;
den = 1625;
break;
@@ -529,11 +560,11 @@ static void __init realtime_counter_init(void)
num = 192;
den = 625;
break;
- case 2600000:
+ case 26000000:
num = 384;
den = 1625;
break;
- case 2700000:
+ case 27000000:
num = 256;
den = 1125;
break;
@@ -545,6 +576,7 @@ static void __init realtime_counter_init(void)
break;
}
+sysclk1_based:
/* Program numerator and denumerator registers */
reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) &
NUMERATOR_DENUMERATOR_MASK;
@@ -556,7 +588,7 @@ static void __init realtime_counter_init(void)
reg |= den;
writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
- arch_timer_freq = (rate / den) * num;
+ arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den);
set_cntfreq();
iounmap(base);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 4457e731f7a4..292eca0e78ed 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -66,19 +66,24 @@ void __init omap_pmic_init(int bus, u32 clkrate,
omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
}
+#ifdef CONFIG_ARCH_OMAP4
void __init omap4_pmic_init(const char *pmic_type,
struct twl4030_platform_data *pmic_data,
struct i2c_board_info *devices, int nr_devices)
{
/* PMIC part*/
+ unsigned int irq;
+
omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
- omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
+ irq = omap4_xlate_irq(7 + OMAP44XX_IRQ_GIC_START);
+ omap_pmic_init(1, 400, pmic_type, irq, pmic_data);
/* Register additional devices on i2c1 bus if needed */
if (devices)
i2c_register_board_info(1, devices, nr_devices);
}
+#endif
void __init omap_pmic_late_init(void)
{
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index d226b71d21d5..a611f4852582 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -19,11 +19,37 @@
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/irqchip.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/hardware/cache-l2x0.h>
#include "core.h"
+#define RK3288_GRF_SOC_CON0 0x244
+
+static void __init rockchip_timer_init(void)
+{
+ if (of_machine_is_compatible("rockchip,rk3288")) {
+ struct regmap *grf;
+
+ /*
+ * Disable auto jtag/sdmmc switching that causes issues
+ * with the mmc controllers making them unreliable
+ */
+ grf = syscon_regmap_lookup_by_compatible("rockchip,rk3288-grf");
+ if (!IS_ERR(grf))
+ regmap_write(grf, RK3288_GRF_SOC_CON0, 0x10000000);
+ else
+ pr_err("rockchip: could not get grf syscon\n");
+ }
+
+ of_clk_init(NULL);
+ clocksource_of_init();
+}
+
static void __init rockchip_dt_init(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
@@ -42,6 +68,7 @@ static const char * const rockchip_board_dt_compat[] = {
DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
+ .init_time = rockchip_timer_init,
.dt_compat = rockchip_board_dt_compat,
.init_machine = rockchip_dt_init,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 79ad93dfdae4..d191cf419731 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -800,7 +800,14 @@ void __init r8a7740_init_irq_of(void)
void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000);
+
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
+#endif
/* route signals to GIC */
iowrite32(0x0, pfc_inta_ctrl);
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 170bd146ba17..cef8895a9b82 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -576,11 +576,18 @@ void __init r8a7778_init_irq_extpin(int irlm)
void __init r8a7778_init_irq_dt(void)
{
void __iomem *base = ioremap_nocache(0xfe700000, 0x00100000);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xfe438000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xfe430000, 0x1000);
+#endif
BUG_ON(!base);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
-
+#endif
/* route all interrupts to ARM */
__raw_writel(0x73ffffff, base + INT2NTSR0);
__raw_writel(0xffffffff, base + INT2NTSR1);
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 6156d172cf31..27dceaf9e688 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -720,10 +720,17 @@ static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
void __init r8a7779_init_irq_dt(void)
{
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000);
+#endif
gic_arch_extn.irq_set_wake = r8a7779_set_wake;
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
-
+#endif
/* route all interrupts to ARM */
__raw_writel(0xffffffff, INT2NTSR0);
__raw_writel(0x3fffffff, INT2NTSR1);
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 93ebe3430bfe..fb5e1bb34be8 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -595,6 +595,7 @@ static struct platform_device ipmmu_device = {
static struct renesas_intc_irqpin_config irqpin0_platform_data = {
.irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
+ .control_parent = true,
};
static struct resource irqpin0_resources[] = {
@@ -656,6 +657,7 @@ static struct platform_device irqpin1_device = {
static struct renesas_intc_irqpin_config irqpin2_platform_data = {
.irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
+ .control_parent = true,
};
static struct resource irqpin2_resources[] = {
@@ -686,6 +688,7 @@ static struct platform_device irqpin2_device = {
static struct renesas_intc_irqpin_config irqpin3_platform_data = {
.irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
+ .control_parent = true,
};
static struct resource irqpin3_resources[] = {
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1c43cec971b5..066688863920 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -85,6 +85,7 @@ vdso_install:
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+ $(Q)$(MAKE) $(clean)=$(boot)/dts
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 3b8d427c3985..c62b0f4d9ef6 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -3,6 +3,4 @@ dts-dirs += apm
dts-dirs += arm
dts-dirs += cavium
-always := $(dtb-y)
subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index cb3073e4e7a8..d429129ecb3d 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -22,7 +22,7 @@
};
chosen {
- stdout-path = &soc_uart0;
+ stdout-path = "serial0:115200n8";
};
psci {
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 8127e45e2637..3cb4c856b10d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -41,6 +41,18 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+ vcpu->arch.hcr_el2 &= ~HCR_RW;
+}
+
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hcr_el2;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+ vcpu->arch.hcr_el2 = hcr;
}
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0b7dfdb931df..acd101a9014d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
/* Don't run the guest */
bool pause;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..adcf49547301 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size,
- bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size,
+ bool ipa_uncached)
{
+ void *va = page_address(pfn_to_page(pfn));
+
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc((void *)hva, size);
+ kvm_flush_dcache_to_poc(va, size);
if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range(hva, hva + size);
+ flush_icache_range((unsigned long)va,
+ (unsigned long)va + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+ struct page *page = pud_page(pud);
+ kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+}
+
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index b780c6c76eec..23e9432ac112 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 387
+#define __NR_compat_syscalls 388
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 8893cebcea5b..27224426e0bf 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
__SYSCALL(__NR_memfd_create, sys_memfd_create)
#define __NR_bpf 386
__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 387
+__SYSCALL(__NR_execveat, compat_sys_execveat)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index fbe909fb0a1a..c3ca89c27c6b 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
+ lsr x1, x1, #12
tlbi ipas2e1is, x1
/*
* We have to ensure completion of the invalidation at Stage-2,
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 70a7816535cd..0b4326578985 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (!cpu_has_32bit_el1())
return -EINVAL;
cpu_reset = &default_regs_reset32;
- vcpu->arch.hcr_el2 &= ~HCR_RW;
} else {
cpu_reset = &default_regs_reset;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3d7c2df89946..f31e8bb2bc5b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
return ccsidr;
}
-static void do_dc_cisw(u32 val)
-{
- asm volatile("dc cisw, %x0" : : "r" (val));
- dsb(ish);
-}
-
-static void do_dc_csw(u32 val)
-{
- asm volatile("dc csw, %x0" : : "r" (val));
- dsb(ish);
-}
-
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- unsigned long val;
- int cpu;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
- cpumask_setall(&vcpu->arch.require_dcache_flush);
- cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
- /* If we were already preempted, take the long way around */
- if (cpu != vcpu->arch.last_pcpu) {
- flush_cache_all();
- goto done;
- }
-
- val = *vcpu_reg(vcpu, p->Rt);
-
- switch (p->CRm) {
- case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
- case 14: /* DCCISW */
- do_dc_cisw(val);
- break;
-
- case 10: /* DCCSW */
- do_dc_csw(val);
- break;
- }
-
-done:
- put_cpu();
-
+ kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
+ bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
}
- return true;
-}
-
-/*
- * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- */
-static bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
- access_vm_reg(vcpu, p, r);
-
- if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
- vcpu->arch.hcr_el2 &= ~HCR_TVM;
- stage2_flush_vm(vcpu->kvm);
- }
-
+ kvm_toggle_cache(vcpu, was_enabled);
return true;
}
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_mpidr, MPIDR_EL1 },
/* SCTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
- access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+ access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
/* CPACR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
NULL, reset_val, CPACR_EL1, 0 },
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register).
*/
static const struct sys_reg_desc cp15_regs[] = {
- { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index cf33f33333cc..d54dc9ac4b70 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -15,6 +15,7 @@
*/
#include <linux/debugfs.h>
#include <linux/fs.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index bac492c12fcc..c95464a33f36 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -335,14 +335,8 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd) {
- if (start == initrd_start)
- start = round_down(start, PAGE_SIZE);
- if (end == initrd_end)
- end = round_up(end, PAGE_SIZE);
-
+ if (!keep_initrd)
free_reserved_area((void *)start, (void *)end, 0, "initrd");
- }
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 2c9412908024..164efa009e5b 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -19,12 +19,10 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
vfree(mod->arch.syminfo);
mod->arch.syminfo = NULL;
-
- vfree(module_region);
}
static inline int check_rela(Elf32_Rela *rela, struct module *module,
@@ -291,12 +289,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
return ret;
}
-
-int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *module)
-{
- vfree(module->arch.syminfo);
- module->arch.syminfo = NULL;
-
- return 0;
-}
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 0eca93327195..d223a8b57c1e 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -142,6 +142,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 08a313fc2241..f772068d9e79 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -604,7 +604,7 @@ static ssize_t __sync_serial_read(struct file *file,
struct timespec *ts)
{
unsigned long flags;
- int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int dev = MINOR(file_inode(file)->i_rdev);
int avail;
struct sync_port *port;
unsigned char *start;
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index 51123f985eb5..af04cb6b6dc9 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
}
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
kfree(module_region);
}
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 1790f22e71a2..2686a7aa8ec8 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -176,6 +176,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 67b1d1685759..0635bd6c2af3 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -94,7 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
r = &dev->resource[idx];
if (!r->start)
continue;
- pci_claim_resource(dev, idx);
+ pci_claim_bridge_resource(dev, idx);
}
}
pcibios_allocate_bus_resources(&bus->children);
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 9a66372fc7c7..ec4917ddf678 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 24603be24c14..29754aae5177 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -305,14 +305,12 @@ plt_target (struct plt_entry *plt)
#endif /* !USE_BRL */
void
-module_free (struct module *mod, void *module_region)
+module_arch_freeing_init (struct module *mod)
{
- if (mod && mod->arch.init_unw_table &&
- module_region == mod->module_init) {
+ if (mod->arch.init_unw_table) {
unw_remove_unwind_table(mod->arch.init_unw_table);
mod->arch.init_unw_table = NULL;
}
- vfree(module_region);
}
/* Have we already seen one of these relocations? */
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 7225dad87094..ba5ba7accd0d 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -172,6 +172,8 @@ retry:
*/
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ goto bad_area;
} else if (fault & VM_FAULT_SIGBUS) {
signal = SIGBUS;
goto bad_area;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 291a582777cf..900cc93e5409 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -487,45 +487,39 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
}
-static int is_valid_resource(struct pci_dev *dev, int idx)
+void pcibios_fixup_device_resources(struct pci_dev *dev)
{
- unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- struct resource *devr = &dev->resource[idx], *busr;
+ int idx;
if (!dev->bus)
- return 0;
-
- pci_bus_for_each_resource(dev->bus, busr, i) {
- if (!busr || ((busr->flags ^ devr->flags) & type_mask))
- continue;
- if ((devr->start) && (devr->start >= busr->start) &&
- (devr->end <= busr->end))
- return 1;
- }
- return 0;
-}
+ return;
-static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
-{
- int i;
+ for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
- for (i = start; i < limit; i++) {
- if (!dev->resource[i].flags)
+ if (!r->flags || r->parent || !r->start)
continue;
- if ((is_valid_resource(dev, i)))
- pci_claim_resource(dev, i);
- }
-}
-void pcibios_fixup_device_resources(struct pci_dev *dev)
-{
- pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
+ pci_claim_resource(dev, idx);
+ }
}
EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
{
- pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
+ int idx;
+
+ if (!dev->bus)
+ return;
+
+ for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
+
+ if (!r->flags || r->parent || !r->start)
+ continue;
+
+ pci_claim_bridge_resource(dev, idx);
+ }
}
/*
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index e9c6a8014bd6..e3d4d4890104 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -200,6 +200,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 75e75d7b1702..244e0dbe45db 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 355
+#define NR_syscalls 356
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 2c1bec9a14b6..61fb6cb9d2ae 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -360,5 +360,6 @@
#define __NR_getrandom 352
#define __NR_memfd_create 353
#define __NR_bpf 354
+#define __NR_execveat 355
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 2ca219e184cd..a0ec4303f2c8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -375,4 +375,5 @@ ENTRY(sys_call_table)
.long sys_getrandom
.long sys_memfd_create
.long sys_bpf
+ .long sys_execveat /* 355 */
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 2bd7487440c4..b2f04aee46ec 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -145,6 +145,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto map_err;
else if (fault & VM_FAULT_SIGBUS)
goto bus_err;
BUG();
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 332680e5ebf2..2de5dc695a87 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -141,6 +141,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index fa4cf52aa7a6..d46a5ebb7570 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -224,6 +224,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index b30e41c0c033..48528fb81eff 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -1026,6 +1026,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+ struct pci_dev *dev = bus->self;
+
if (request_resource(pr, res) == 0)
continue;
/*
@@ -1035,6 +1037,12 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
*/
if (reparent_resources(pr, res) == 0)
continue;
+
+ if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+ pci_claim_bridge_resource(dev,
+ i + PCI_BRIDGE_RESOURCES) == 0)
+ continue;
+
}
pr_warn("PCI: Cannot allocate resource region ");
pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1227,7 +1235,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index becc42bb1849..70ab5d664332 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -158,6 +158,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 9fd6834a2172..5d6139390bf8 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1388,7 +1388,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 3516cbdf1ee9..0c2cc5d39c8e 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -262,6 +262,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index febb9cd83177..b5b036f64275 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -106,7 +106,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
if (!r->flags)
continue;
if (!r->start ||
- pci_claim_resource(dev, idx) < 0) {
+ pci_claim_bridge_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of bridge %s\n",
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 6b4339f8c9c2..471ff398090c 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -281,42 +281,37 @@ static int __init pci_check_direct(void)
return -ENODEV;
}
-static int is_valid_resource(struct pci_dev *dev, int idx)
+static void pcibios_fixup_device_resources(struct pci_dev *dev)
{
- unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- struct resource *devr = &dev->resource[idx], *busr;
-
- if (dev->bus) {
- pci_bus_for_each_resource(dev->bus, busr, i) {
- if (!busr || (busr->flags ^ devr->flags) & type_mask)
- continue;
-
- if (devr->start &&
- devr->start >= busr->start &&
- devr->end <= busr->end)
- return 1;
- }
- }
+ int idx;
- return 0;
+ if (!dev->bus)
+ return;
+
+ for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
+
+ if (!r->flags || r->parent || !r->start)
+ continue;
+
+ pci_claim_resource(dev, idx);
+ }
}
-static void pcibios_fixup_device_resources(struct pci_dev *dev)
+static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
{
- int limit, i;
+ int idx;
- if (dev->bus->number != 0)
+ if (!dev->bus)
return;
- limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ?
- PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
+ for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
- for (i = 0; i < limit; i++) {
- if (!dev->resource[i].flags)
+ if (!r->flags || r->parent || !r->start)
continue;
- if (is_valid_resource(dev, i))
- pci_claim_resource(dev, i);
+ pci_claim_bridge_resource(dev, idx);
}
}
@@ -330,7 +325,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
if (bus->self) {
pci_read_bridge_bases(bus);
- pcibios_fixup_device_resources(bus->self);
+ pcibios_fixup_bridge_resources(bus->self);
}
list_for_each_entry(dev, &bus->devices, bus_list)
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
index cc924a38f22a..e2e3f13f98d5 100644
--- a/arch/nios2/kernel/module.c
+++ b/arch/nios2/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
}
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
kfree(module_region);
}
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index f9d27883a714..2d0ea25be171 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -200,7 +200,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
/* Set up to return from userspace; jump to fixed address sigreturn
trampoline on kuser page. */
- regs->ra = (unsigned long) (0x1040);
+ regs->ra = (unsigned long) (0x1044);
/* Set up registers for signal handler */
regs->sp = (unsigned long) frame;
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 15a0bb5fc06d..34429d5a0ccd 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -135,6 +135,8 @@ survive:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 0703acf7d327..230ac20ae794 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -171,6 +171,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 50dfafc3f2c1..5822e8e200e6 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -298,14 +298,10 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
}
#endif
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
kfree(mod->arch.section);
mod->arch.section = NULL;
-
- vfree(module_region);
}
/* Additional bytes needed in front of individual sections */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 3ca9c1131cfe..e5120e653240 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -256,6 +256,8 @@ good_area:
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto bad_area;
BUG();
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index d3feba5a275f..c154cebc1041 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
+MODULE_ALIAS_CRYPTO("sha1");
MODULE_ALIAS_CRYPTO("sha1-powerpc");
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index ebc4f165690a..0be6c681cab1 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -23,9 +23,9 @@
#define THREAD_SIZE (1 << THREAD_SHIFT)
#ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
#else
-#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
#endif
#ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */
-register unsigned long __current_r1 asm("r1");
static inline struct thread_info *current_thread_info(void)
{
- /* gcc4, at least, is smart enough to turn this into a single
- * rlwinm for ppc32 and clrrdi for ppc64 */
- return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
+ unsigned long val;
+
+ asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+ return (struct thread_info *)val;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 37d512d35943..2a525c938158 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1184,6 +1184,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+ struct pci_dev *dev = bus->self;
+
if (request_resource(pr, res) == 0)
continue;
/*
@@ -1193,6 +1195,11 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
*/
if (reparent_resources(pr, res) == 0)
continue;
+
+ if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+ pci_claim_bridge_resource(dev,
+ i + PCI_BRIDGE_RESOURCES) == 0)
+ continue;
}
pr_warning("PCI: Cannot allocate resource region "
"%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1401,7 +1408,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 5a236f082c78..1b5305d4bdab 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
goto out_unlock;
- } else if (*flt & VM_FAULT_SIGBUS) {
+ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
ret = -EFAULT;
goto out_unlock;
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index eb79907f34fa..6154b0a2b063 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -437,6 +437,8 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+ if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN)
goto bail;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 1ca125b9c226..d1916b577f2c 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -699,7 +699,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 54eca8b3b288..0509bca5e830 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
ld r12,opal_tracepoint_refcount@toc(r2); \
- std r12,32(r1); \
cmpdi r12,0; \
bne- LABEL; \
1:
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index b700a329c31d..d2de7d5d7574 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
* all cpus at boot. Get these reg values of current cpu and use the
* same accross all cpus.
*/
- uint64_t lpcr_val = mfspr(SPRN_LPCR);
+ uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
uint64_t hid0_val = mfspr(SPRN_HID0);
uint64_t hid1_val = mfspr(SPRN_HID1);
uint64_t hid4_val = mfspr(SPRN_HID4);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 5b150f0c5df9..13c6e200b24e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
args.token = rtas_token("set-indicator");
if (args.token == RTAS_UNKNOWN_SERVICE)
return;
+ args.token = cpu_to_be32(args.token);
args.nargs = cpu_to_be32(3);
args.nret = cpu_to_be32(1);
args.rets = &args.args[3];
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 32040ace00ea..afbe07907c10 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -231,7 +231,7 @@ failed:
struct dbfs_d2fc_hdr {
u64 len; /* Length of d2fc buffer without header */
u16 version; /* Version of header */
- char tod_ext[16]; /* TOD clock for d2fc */
+ char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
u64 count; /* Number of VM guests in d2fc buffer */
char reserved[30];
} __attribute__ ((packed));
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 37b9091ab8c0..16aa0c779e07 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
static inline notrace unsigned long arch_local_save_flags(void)
{
- return __arch_local_irq_stosm(0x00);
+ return __arch_local_irq_stnsm(0xff);
}
static inline notrace unsigned long arch_local_irq_save(void)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8beee1cceba4..98eb2a579223 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
set_clock_comparator(S390_lowcore.clock_comparator);
}
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
typedef unsigned long long cycles_t;
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
{
- typedef struct { char _[sizeof(clk)]; } addrtype;
+ typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
}
static inline unsigned long long get_tod_clock(void)
{
- unsigned char clk[16];
+ unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
}
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 2b446cf0cc65..67878af257a0 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -289,7 +289,8 @@
#define __NR_bpf 351
#define __NR_s390_pci_mmio_write 352
#define __NR_s390_pci_mmio_read 353
-#define NR_syscalls 354
+#define __NR_execveat 354
+#define NR_syscalls 355
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index b89b59158b95..409d152585be 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -55,14 +55,10 @@ void *module_alloc(unsigned long size)
}
#endif
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
- if (mod) {
- vfree(mod->arch.syminfo);
- mod->arch.syminfo = NULL;
- }
- vfree(module_region);
+ vfree(mod->arch.syminfo);
+ mod->arch.syminfo = NULL;
}
static void check_rela(Elf_Rela *rela, struct module *me)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index a2987243bc76..939ec474b1dd 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index f6b3cd056ec2..cc7328080b60 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
return false;
}
+static int check_per_event(unsigned short cause, unsigned long control,
+ struct pt_regs *regs)
+{
+ if (!(regs->psw.mask & PSW_MASK_PER))
+ return 0;
+ /* user space single step */
+ if (control == 0)
+ return 1;
+ /* over indication for storage alteration */
+ if ((control & 0x20200000) && (cause & 0x2000))
+ return 1;
+ if (cause & 0x8000) {
+ /* all branches */
+ if ((control & 0x80800000) == 0x80000000)
+ return 1;
+ /* branch into selected range */
+ if (((control & 0x80800000) == 0x80800000) &&
+ regs->psw.addr >= current->thread.per_user.start &&
+ regs->psw.addr <= current->thread.per_user.end)
+ return 1;
+ }
+ return 0;
+}
+
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (regs->psw.addr - utask->xol_vaddr == ilen)
regs->psw.addr = utask->vaddr + ilen;
}
- /* If per tracing was active generate trap */
- if (regs->psw.mask & PSW_MASK_PER)
- do_per_trap(regs);
+ if (check_per_event(current->thread.per_event.cause,
+ current->thread.per_user.control, regs)) {
+ /* fix per address */
+ current->thread.per_event.address = utask->vaddr;
+ /* trigger per event */
+ set_pt_regs_flag(regs, PIF_PER_TRAP);
+ }
return 0;
}
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
clear_thread_flag(TIF_UPROBE_SINGLESTEP);
regs->int_code = auprobe->saved_int_code;
regs->psw.addr = current->utask->vaddr;
+ current->thread.per_event.address = current->utask->vaddr;
}
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__rc; \
})
-#define emu_store_ril(ptr, input) \
+#define emu_store_ril(regs, ptr, input) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
+ __typeof__(ptr) __ptr = (ptr); \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
- else if ((u64 __force)ptr & mask) \
+ else if ((u64 __force)__ptr & mask) \
__rc = EMU_SPECIFICATION; \
- else if (put_user(*(input), ptr)) \
+ else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
+ if (__rc == 0) \
+ sim_stor_event(regs, __ptr, mask + 1); \
__rc; \
})
@@ -198,6 +230,25 @@ union split_register {
};
/*
+ * If user per registers are setup to trace storage alterations and an
+ * emulated store took place on a fitting address a user trap is generated.
+ */
+static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
+{
+ if (!(regs->psw.mask & PSW_MASK_PER))
+ return;
+ if (!(current->thread.per_user.control & PER_EVENT_STORE))
+ return;
+ if ((void *)current->thread.per_user.start > (addr + len))
+ return;
+ if ((void *)current->thread.per_user.end < addr)
+ return;
+ current->thread.per_event.address = regs->psw.addr;
+ current->thread.per_event.cause = PER_EVENT_STORE >> 16;
+ set_pt_regs_flag(regs, PIF_PER_TRAP);
+}
+
+/*
* pc relative instructions are emulated, since parameters may not be
* accessible from the xol area due to range limitations.
*/
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
break;
case 0x07: /* sthrl */
- rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+ rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
break;
case 0x0b: /* stgrl */
- rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+ rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
break;
case 0x0f: /* strl */
- rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+ rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 7f0089d9a4aa..e34122e539a1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
struct thread_info *ti = task_thread_info(tsk);
u64 timer, system;
- WARN_ON_ONCE(!irqs_disabled());
-
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 811937bb90be..9065d5aa3932 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
do_no_context(regs);
else
pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ do_no_context(regs);
+ else
+ do_sigsegv(regs, SEGV_MAPERR);
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index be99357d238c..3cf8cc03fff6 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
{
struct page *page;
- unsigned long offset;
+ unsigned long offset, mask;
offset = (unsigned long) entry / sizeof(unsigned long);
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
- page = pmd_to_page((pmd_t *) entry);
+ mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+ page = virt_to_page((void *)((unsigned long) entry & mask));
return page->index + offset;
}
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index 7e45d13816c1..ba44c9f55346 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -22,8 +22,8 @@
* skb_copy_bits takes 4 parameters:
* %r2 = skb pointer
* %r3 = offset into skb data
- * %r4 = length to copy
- * %r5 = pointer to temp buffer
+ * %r4 = pointer to temp buffer
+ * %r5 = length to copy
*/
#define SKBDATA %r8
@@ -44,8 +44,9 @@ ENTRY(sk_load_word)
sk_load_word_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,4 # 4 bytes
- la %r5,160(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,160(%r15) # pointer to temp buffer
+ lghi %r5,4 # 4 bytes
brasl %r14,skb_copy_bits # get data from skb
l %r5,160(%r15) # load result from temp buffer
ltgr %r2,%r2 # set cc to (%r2 != 0)
@@ -69,8 +70,9 @@ ENTRY(sk_load_half)
sk_load_half_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,2 # 2 bytes
- la %r5,162(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,162(%r15) # pointer to temp buffer
+ lghi %r5,2 # 2 bytes
brasl %r14,skb_copy_bits # get data from skb
xc 160(2,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
@@ -95,8 +97,9 @@ ENTRY(sk_load_byte)
sk_load_byte_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,1 # 1 bytes
- la %r5,163(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,163(%r15) # pointer to temp buffer
+ lghi %r5,1 # 1 byte
brasl %r14,skb_copy_bits # get data from skb
xc 160(3,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
@@ -104,11 +107,11 @@ sk_load_byte_slow:
lgr %r2,%r9 # restore %r2
br %r8
- /* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */
+ /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
ENTRY(sk_load_byte_msh)
llgfr %r1,%r3 # extend offset
clr %r11,%r3 # hlen < offset ?
- jle sk_load_byte_slow
+ jle sk_load_byte_msh_slow
lhi %r12,0
ic %r12,0(%r1,%r10) # get byte from skb
nill %r12,0x0f
@@ -118,8 +121,9 @@ ENTRY(sk_load_byte_msh)
sk_load_byte_msh_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,2 # 2 bytes
- la %r5,162(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,163(%r15) # pointer to temp buffer
+ lghi %r5,1 # 1 byte
brasl %r14,skb_copy_bits # get data from skb
xc 160(3,%r15),160(%r15)
l %r12,160(%r15) # load result from temp buffer
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index c52ac77408ca..bbd1981cc150 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_DISP(0x88500000, K);
break;
case BPF_ALU | BPF_NEG: /* A = -A */
- /* lnr %r5,%r5 */
- EMIT2(0x1155);
+ /* lcr %r5,%r5 */
+ EMIT2(0x1355);
break;
case BPF_JMP | BPF_JA: /* ip += K */
offset = addrs[i + K] + jit->start - jit->prg;
@@ -448,15 +448,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
mask = 0x800000; /* je */
kbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
- if (K <= 16383)
- /* chi %r5,<K> */
- EMIT4_IMM(0xa75e0000, K);
- else if (test_facility(21))
+ if (test_facility(21))
/* clfi %r5,<K> */
EMIT6_IMM(0xc25f0000, K);
else
- /* c %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5950d000, EMIT_CONST(K));
+ /* cl %r5,<d(K)>(%r13) */
+ EMIT4_DISP(0x5550d000, EMIT_CONST(K));
}
branch: if (filter->jt == filter->jf) {
if (filter->jt == 0)
@@ -502,8 +499,8 @@ branch: if (filter->jt == filter->jf) {
xbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
jit->seen |= SEEN_XREG;
- /* cr %r5,%r12 */
- EMIT2(0x195c);
+ /* clr %r5,%r12 */
+ EMIT2(0x155c);
}
goto branch;
case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 52238983527d..6860beb2a280 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -114,6 +114,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 541dc6101508..a58fec9b55e0 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else {
if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area(regs, error_code, address);
else
BUG();
}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index b36365f49478..9ce5afe167ff 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -639,7 +639,10 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 908e8c17c902..70d817154fe8 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -249,6 +249,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 18fcd7167095..479823249429 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -446,6 +446,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index f33e7c7a3bf7..7931eeeb649a 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -776,7 +776,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
if (unlikely(proglen + ilen > oldproglen)) {
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
- module_free(NULL, image);
+ module_memfree(image);
return;
}
memcpy(image + proglen, temp, ilen);
@@ -822,7 +822,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 96447c9160a0..2305084c9b93 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -74,7 +74,7 @@ error:
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
vfree(module_region);
@@ -83,7 +83,7 @@ void module_free(struct module *mod, void *module_region)
0, 0, 0, NULL, NULL, 0);
/*
- * FIXME: If module_region == mod->module_init, trim exception
+ * FIXME: Add module_arch_freeing_init to trim exception
* table entries.
*/
}
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 565e25a98334..0f61a73534e6 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -442,6 +442,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 5678c3571e7c..209617302df8 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -80,6 +80,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ goto out;
} else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES;
goto out;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba397bde7948..0dc9d0144a27 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -857,7 +857,7 @@ source "kernel/Kconfig.preempt"
config X86_UP_APIC
bool "Local APIC support on uniprocessors"
- depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
+ depends on X86_32 && !SMP && !X86_32_NON_STANDARD
---help---
A local APIC (Advanced Programmable Interrupt Controller) is an
integrated interrupt controller in the CPU. If you have a single-CPU
@@ -868,6 +868,10 @@ config X86_UP_APIC
performance counters), and the NMI watchdog which detects hard
lockups.
+config X86_UP_APIC_MSI
+ def_bool y
+ select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
+
config X86_UP_IOAPIC
bool "IO-APIC support on uniprocessors"
depends on X86_UP_APIC
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index d999398928bc..ad754b4411f7 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_LZ4) := lz4
RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
- perl $(srctree)/arch/x86/tools/calc_run_size.pl)
+ $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
quiet_cmd_mkpiggy = MKPIGGY $@
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index dcc1c536cc21..a950864a64da 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -373,6 +373,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
unsigned long output_len,
unsigned long run_size)
{
+ unsigned char *output_orig = output;
+
real_mode = rmode;
sanitize_boot_params(real_mode);
@@ -421,7 +423,12 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
debug_putstr("\nDecompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
parse_elf(output);
- handle_relocations(output, output_len);
+ /*
+ * 32-bit always performs relocations. 64-bit relocations are only
+ * needed if kASLR has chosen a different load address.
+ */
+ if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
+ handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n");
return output;
}
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a225a5ca1037..fd9f6b035b16 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -931,4 +931,4 @@ module_exit(sha1_mb_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0ab4f9fd2687..3a45668f6dc3 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -50,6 +50,7 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity);
+extern void (*__acpi_unregister_gsi)(u32 gsi);
static inline void disable_acpi(void)
{
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 50d033a8947d..a94b82e8f156 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}
-#define _LDT_empty(info) \
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
+#define LDT_empty(info) \
((info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
(info)->seg_not_present == 1 && \
(info)->useable == 0)
-#ifdef CONFIG_X86_64
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
-#else
-#define LDT_empty(info) (_LDT_empty(info))
-#endif
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
+static inline bool LDT_zero(const struct user_desc *info)
+{
+ return (info->base_addr == 0 &&
+ info->limit == 0 &&
+ info->contents == 0 &&
+ info->read_exec_only == 0 &&
+ info->seg_32bit == 0 &&
+ info->limit_in_pages == 0 &&
+ info->seg_not_present == 0 &&
+ info->useable == 0);
+}
static inline void clear_LDT(void)
{
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 40269a2bf6f9..4b75d591eb5e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -130,7 +130,25 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- mpx_notify_unmap(mm, vma, start, end);
+ /*
+ * mpx_notify_unmap() goes and reads a rarely-hot
+ * cacheline in the mm_struct. That can be expensive
+ * enough to be seen in profiles.
+ *
+ * The mpx_notify_unmap() call and its contents have been
+ * observed to affect munmap() performance on hardware
+ * where MPX is not present.
+ *
+ * The unlikely() optimizes for the fast case: no MPX
+ * in the CPU, or no MPX use in the process. Even if
+ * we get this wrong (in the unlikely event that MPX
+ * is widely enabled on some system) the overhead of
+ * MPX itself (reading bounds tables) is expected to
+ * overwhelm the overhead of getting this unlikely()
+ * consistently wrong.
+ */
+ if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
+ mpx_notify_unmap(mm, vma, start, end);
}
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d1626364a28a..b9e30daa0881 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -611,20 +611,20 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
{
- int irq;
-
- if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
- *irqp = gsi;
- } else {
- mutex_lock(&acpi_ioapic_lock);
- irq = mp_map_gsi_to_irq(gsi,
- IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
- mutex_unlock(&acpi_ioapic_lock);
- if (irq < 0)
- return -1;
- *irqp = irq;
+ int rc, irq, trigger, polarity;
+
+ rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+ if (rc == 0) {
+ trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+ irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
+ if (irq >= 0) {
+ *irqp = irq;
+ return 0;
+ }
}
- return 0;
+
+ return -1;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index a450373e8e91..939155ffdece 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -107,6 +107,7 @@ static struct clocksource hyperv_cs = {
.rating = 400, /* use this when running on Hyperv*/
.read = read_hv_clock,
.mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init ms_hyperv_init_platform(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 944bf019b74f..498b6d967138 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
break;
case 55: /* 22nm Atom "Silvermont" */
+ case 76: /* 14nm Atom "Airmont" */
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3c895d480cd7..073983398364 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -568,8 +568,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
};
struct event_constraint intel_slm_pebs_event_constraints[] = {
- /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 673f930c700f..c4bb8b8e5017 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -103,6 +103,13 @@ static struct kobj_attribute format_attr_##_var = \
#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
+#define RAPL_EVENT_ATTR_STR(_name, v, str) \
+static struct perf_pmu_events_attr event_attr_##v = { \
+ .attr = __ATTR(_name, 0444, rapl_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = str, \
+};
+
struct rapl_pmu {
spinlock_t lock;
int hw_unit; /* 1/2^hw_unit Joule */
@@ -135,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
* or use ldexp(count, -32).
* Watts = Joules/Time delta
*/
- return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
+ return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
}
static u64 rapl_event_update(struct perf_event *event)
@@ -379,23 +386,36 @@ static struct attribute_group rapl_pmu_attr_group = {
.attrs = rapl_pmu_attrs,
};
-EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
-EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
-EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
-EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
+static ssize_t rapl_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr = \
+ container_of(attr, struct perf_pmu_events_attr, attr);
+
+ if (pmu_attr->event_str)
+ return sprintf(page, "%s", pmu_attr->event_str);
+
+ return 0;
+}
+
+RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
+RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
+RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
+RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
-EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
-EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
-EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
-EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
/*
* we compute in 0.23 nJ increments regardless of MSR
*/
-EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
static struct attribute *rapl_events_srv_attr[] = {
EVENT_PTR(rapl_cores),
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 10b8d3eaaf15..c635b8b49e93 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
box->phys_id = phys_id;
box->pci_dev = pdev;
box->pmu = pmu;
- uncore_box_init(box);
pci_set_drvdata(pdev, box);
raw_spin_lock(&uncore_box_lock);
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
pmu = &type->pmus[j];
box = *per_cpu_ptr(pmu->box, cpu);
/* called by uncore_cpu_init? */
- if (box && box->phys_id >= 0) {
- uncore_box_init(box);
+ if (box && box->phys_id >= 0)
continue;
- }
for_each_online_cpu(k) {
exist = *per_cpu_ptr(pmu->box, k);
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
}
}
- if (box) {
+ if (box)
box->phys_id = phys_id;
- uncore_box_init(box);
- }
}
}
return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 863d9b02563e..6c8c1e7e69d8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
return box->pmu->type->num_counters;
}
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+ if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+ if (box->pmu->type->ops->init_box)
+ box->pmu->type->ops->init_box(box);
+ }
+}
+
static inline void uncore_disable_box(struct intel_uncore_box *box)
{
if (box->pmu->type->ops->disable_box)
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
static inline void uncore_enable_box(struct intel_uncore_box *box)
{
+ uncore_box_init(box);
+
if (box->pmu->type->ops->enable_box)
box->pmu->type->ops->enable_box(box);
}
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
return box->pmu->type->ops->read_counter(box, event);
}
-static inline void uncore_box_init(struct intel_uncore_box *box)
-{
- if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
- if (box->pmu->type->ops->init_box)
- box->pmu->type->ops->init_box(box);
- }
-}
-
static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{
return (box->phys_id < 0);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 2142376dc8c6..8b7b0a51e742 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -674,7 +674,7 @@ static inline void *alloc_tramp(unsigned long size)
}
static inline void tramp_free(void *tramp)
{
- module_free(NULL, tramp);
+ module_memfree(tramp);
}
#else
/* Trampolines can only be created if modules are supported */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 6307a0f0cf17..705ef8d48e2d 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -127,7 +127,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_puts(p, " Machine check polls\n");
#endif
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
- seq_printf(p, "%*s: ", prec, "THR");
+ seq_printf(p, "%*s: ", prec, "HYP");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
seq_puts(p, " Hypervisor callback interrupts\n");
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f7e3cd50ece0..98f654d466e5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
+
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer to get messed up.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
u8 *addr = (u8 *) (regs->ip - 1);
struct jprobe *jp = container_of(p, struct jprobe, kp);
+ void *saved_sp = kcb->jprobe_saved_sp;
if ((addr > (u8 *) jprobe_return) &&
(addr < (u8 *) jprobe_return_end)) {
- if (stack_addr(regs) != kcb->jprobe_saved_sp) {
+ if (stack_addr(regs) != saved_sp) {
struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
printk(KERN_ERR
"current sp %p does not match saved sp %p\n",
- stack_addr(regs), kcb->jprobe_saved_sp);
+ stack_addr(regs), saved_sp);
printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
show_regs(saved_regs);
printk(KERN_ERR "Current registers\n");
show_regs(regs);
BUG();
}
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
- memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
- kcb->jprobes_stack,
- MIN_STACK_SIZE(kcb->jprobe_saved_sp));
+ memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 4e942f31b1a7..7fc5e843f247 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -29,7 +29,28 @@ static int get_free_idx(void)
static bool tls_desc_okay(const struct user_desc *info)
{
- if (LDT_empty(info))
+ /*
+ * For historical reasons (i.e. no one ever documented how any
+ * of the segmentation APIs work), user programs can and do
+ * assume that a struct user_desc that's all zeros except for
+ * entry_number means "no segment at all". This never actually
+ * worked. In fact, up to Linux 3.19, a struct user_desc like
+ * this would create a 16-bit read-write segment with base and
+ * limit both equal to zero.
+ *
+ * That was close enough to "no segment at all" until we
+ * hardened this function to disallow 16-bit TLS segments. Fix
+ * it up by interpreting these zeroed segments the way that they
+ * were almost certainly intended to be interpreted.
+ *
+ * The correct way to ask for "no segment at all" is to specify
+ * a user_desc that satisfies LDT_empty. To keep everything
+ * working, we accept both.
+ *
+ * Note that there's a similar kludge in modify_ldt -- look at
+ * the distinction between modes 1 and 0x11.
+ */
+ if (LDT_empty(info) || LDT_zero(info))
return true;
/*
@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
cpu = get_cpu();
while (n-- > 0) {
- if (LDT_empty(info))
+ if (LDT_empty(info) || LDT_zero(info))
desc->a = desc->b = 0;
else
fill_ldt(desc, info);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b7e50bba3bbb..505449700e0c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -617,7 +617,7 @@ static unsigned long quick_pit_calibrate(void)
goto success;
}
}
- pr_err("Fast TSC calibration failed\n");
+ pr_info("Fast TSC calibration failed\n");
return 0;
success:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 169b09d76ddd..de12c1d379f1 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2348,7 +2348,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
+ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
@@ -2359,25 +2359,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
- switch (ctxt->mode) {
- case X86EMUL_MODE_PROT32:
- if ((msr_data & 0xfffc) == 0x0)
- return emulate_gp(ctxt, 0);
- break;
- case X86EMUL_MODE_PROT64:
- if (msr_data == 0x0)
- return emulate_gp(ctxt, 0);
- break;
- default:
- break;
- }
+ if ((msr_data & 0xfffc) == 0x0)
+ return emulate_gp(ctxt, 0);
ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
- cs_sel = (u16)msr_data;
- cs_sel &= ~SELECTOR_RPL_MASK;
+ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
ss_sel = cs_sel + 8;
- ss_sel &= ~SELECTOR_RPL_MASK;
- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
+ if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
@@ -2386,10 +2374,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
- ctxt->_eip = msr_data;
+ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
+ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
+ (u32)msr_data;
return X86EMUL_CONTINUE;
}
@@ -3791,8 +3780,8 @@ static const struct opcode group5[] = {
};
static const struct opcode group6[] = {
- DI(Prot, sldt),
- DI(Prot, str),
+ DI(Prot | DstMem, sldt),
+ DI(Prot | DstMem, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4f0c0b954686..d52dcf0776ea 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
u16 cid, lid;
u32 ldr, aid;
+ if (!kvm_apic_present(vcpu))
+ continue;
+
aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 38dcec403b46..e3ff27a5b634 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area_nosemaphore(regs, error_code, address);
else
BUG();
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 08a7d313538a..079c3b6a3ff1 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -43,7 +43,7 @@ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
[_PAGE_CACHE_MODE_WT] = _PAGE_PCD,
[_PAGE_CACHE_MODE_WP] = _PAGE_PCD,
};
-EXPORT_SYMBOL_GPL(__cachemode2pte_tbl);
+EXPORT_SYMBOL(__cachemode2pte_tbl);
uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
[__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
@@ -54,7 +54,7 @@ uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
-EXPORT_SYMBOL_GPL(__pte2cachemode_tbl);
+EXPORT_SYMBOL(__pte2cachemode_tbl);
static unsigned long __initdata pgt_buf_start;
static unsigned long __initdata pgt_buf_end;
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 67ebf5751222..c439ec478216 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -349,6 +349,12 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
return MPX_INVALID_BOUNDS_DIR;
/*
+ * 32-bit binaries on 64-bit kernels are currently
+ * unsupported.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
+ return MPX_INVALID_BOUNDS_DIR;
+ /*
* The bounds directory pointer is stored in a register
* only accessible if we first do an xsave.
*/
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index edf299c8ff6c..7ac68698406c 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -234,8 +234,13 @@ void pat_init(void)
PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
/* Boot CPU check */
- if (!boot_pat_state)
+ if (!boot_pat_state) {
rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
+ if (!boot_pat_state) {
+ pat_disable("PAT read returns always zero, disabled.");
+ return;
+ }
+ }
wrmsrl(MSR_IA32_CR_PAT, pat);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 9b18ef315a55..349c0d32cc0b 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -216,7 +216,7 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
continue;
if (r->parent) /* Already allocated */
continue;
- if (!r->start || pci_claim_resource(dev, idx) < 0) {
+ if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
/*
* Something is wrong with the region.
* Invalidate the resource to prevent
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index c489ef2c1a39..9098d880c476 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -458,6 +458,7 @@ int __init pci_xen_hvm_init(void)
* just how GSIs get registered.
*/
__acpi_register_gsi = acpi_register_gsi_xen_hvm;
+ __acpi_unregister_gsi = NULL;
#endif
#ifdef CONFIG_PCI_MSI
@@ -471,52 +472,6 @@ int __init pci_xen_hvm_init(void)
}
#ifdef CONFIG_XEN_DOM0
-static __init void xen_setup_acpi_sci(void)
-{
- int rc;
- int trigger, polarity;
- int gsi = acpi_sci_override_gsi;
- int irq = -1;
- int gsi_override = -1;
-
- if (!gsi)
- return;
-
- rc = acpi_get_override_irq(gsi, &trigger, &polarity);
- if (rc) {
- printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
- " sci, rc=%d\n", rc);
- return;
- }
- trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
- polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-
- printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
- "polarity=%d\n", gsi, trigger, polarity);
-
- /* Before we bind the GSI to a Linux IRQ, check whether
- * we need to override it with bus_irq (IRQ) value. Usually for
- * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
- * but there are oddballs where the IRQ != GSI:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
- * which ends up being: gsi_to_irq[9] == 20
- * (which is what acpi_gsi_to_irq ends up calling when starting the
- * the ACPI interpreter and keels over since IRQ 9 has not been
- * setup as we had setup IRQ 20 for it).
- */
- if (acpi_gsi_to_irq(gsi, &irq) == 0) {
- /* Use the provided value if it's valid. */
- if (irq >= 0)
- gsi_override = irq;
- }
-
- gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
- printk(KERN_INFO "xen: acpi sci %d\n", gsi);
-
- return;
-}
-
int __init pci_xen_initial_domain(void)
{
int irq;
@@ -527,8 +482,8 @@ int __init pci_xen_initial_domain(void)
x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
pci_msi_ignore_mask = 1;
#endif
- xen_setup_acpi_sci();
__acpi_register_gsi = acpi_register_gsi_xen;
+ __acpi_unregister_gsi = NULL;
/* Pre-allocate legacy irqs */
for (irq = 0; irq < nr_legacy_irqs(); irq++) {
int trigger, polarity;
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
deleted file mode 100644
index 23210baade2d..000000000000
--- a/arch/x86/tools/calc_run_size.pl
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/perl
-#
-# Calculate the amount of space needed to run the kernel, including room for
-# the .bss and .brk sections.
-#
-# Usage:
-# objdump -h a.out | perl calc_run_size.pl
-use strict;
-
-my $mem_size = 0;
-my $file_offset = 0;
-
-my $sections=" *[0-9]+ \.(?:bss|brk) +";
-while (<>) {
- if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
- my $size = hex($1);
- my $offset = hex($2);
- $mem_size += $size;
- if ($file_offset == 0) {
- $file_offset = $offset;
- } elsif ($file_offset != $offset) {
- # BFD linker shows the same file offset in ELF.
- # Gold linker shows them as consecutive.
- next if ($file_offset + $mem_size == $offset + $size);
-
- printf STDERR "file_offset: 0x%lx\n", $file_offset;
- printf STDERR "mem_size: 0x%lx\n", $mem_size;
- printf STDERR "offset: 0x%lx\n", $offset;
- printf STDERR "size: 0x%lx\n", $size;
-
- die ".bss and .brk are non-contiguous\n";
- }
- }
-}
-
-if ($file_offset == 0) {
- die "Never found .bss or .brk file offset\n";
-}
-printf("%d\n", $mem_size + $file_offset);
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
new file mode 100644
index 000000000000..1a4c17bb3910
--- /dev/null
+++ b/arch/x86/tools/calc_run_size.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Calculate the amount of space needed to run the kernel, including room for
+# the .bss and .brk sections.
+#
+# Usage:
+# objdump -h a.out | sh calc_run_size.sh
+
+NUM='\([0-9a-fA-F]*[ \t]*\)'
+OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
+if [ -z "$OUT" ] ; then
+ echo "Never found .bss or .brk file offset" >&2
+ exit 1
+fi
+
+OUT=$(echo ${OUT# })
+sizeA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+sizeB=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetB=$(printf "%d" 0x${OUT%% *})
+
+run_size=$(( $offsetA + $sizeA + $sizeB ))
+
+# BFD linker shows the same file offset in ELF.
+if [ "$offsetA" -ne "$offsetB" ] ; then
+ # Gold linker shows them as consecutive.
+ endB=$(( $offsetB + $sizeB ))
+ if [ "$endB" != "$run_size" ] ; then
+ printf "sizeA: 0x%x\n" $sizeA >&2
+ printf "offsetA: 0x%x\n" $offsetA >&2
+ printf "sizeB: 0x%x\n" $sizeB >&2
+ printf "offsetB: 0x%x\n" $offsetB >&2
+ echo ".bss and .brk are non-contiguous" >&2
+ exit 1
+ fi
+fi
+
+printf "%d\n" $run_size
+exit 0
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 6bf3a13e3e0f..78a881b7fc41 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -40,6 +40,7 @@
#include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
#include <xen/interface/xen-mca.h>
#include <xen/features.h>
#include <xen/page.h>
@@ -66,6 +67,7 @@
#include <asm/reboot.h>
#include <asm/stackprotector.h>
#include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart,
};
+static unsigned char xen_get_nmi_reason(void)
+{
+ unsigned char reason = 0;
+
+ /* Construct a value which looks like it came from port 0x61. */
+ if (test_bit(_XEN_NMIREASON_io_error,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_IOCHK;
+ if (test_bit(_XEN_NMIREASON_pci_serr,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_SERR;
+
+ return reason;
+}
+
static void __init xen_boot_params_init_edd(void)
{
#if IS_ENABLED(CONFIG_EDD)
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
pv_info = xen_info;
pv_init_ops = xen_init_ops;
pv_apic_ops = xen_apic_ops;
- if (!xen_pvh_domain())
+ if (!xen_pvh_domain()) {
pv_cpu_ops = xen_cpu_ops;
+ x86_platform.get_nmi_reason = xen_get_nmi_reason;
+ }
+
if (xen_feature(XENFEAT_auto_translated_physmap))
x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
else
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index edbc7a63fd73..70fb5075c901 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
}
-/* Only to be called in case of a race for a page just allocated! */
-static void free_p2m_page(void *p)
+static void __ref free_p2m_page(void *p)
{
- BUG_ON(!slab_is_available());
+ if (unlikely(!slab_is_available())) {
+ free_bootmem((unsigned long)p, PAGE_SIZE);
+ return;
+ }
+
free_page((unsigned long)p);
}
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
p2m_missing_pte : p2m_identity_pte;
for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
pmdp = populate_extra_pmd(
- (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+ (unsigned long)(p2m + pfn) + i * PMD_SIZE);
set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
}
}
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
* a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
* pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
*/
-static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{
pte_t *ptechk;
- pte_t *pteret = ptep;
pte_t *pte_newpg[PMDS_PER_MID_PAGE];
pmd_t *pmdp;
unsigned int level;
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
if (ptechk == pte_pg) {
set_pmd(pmdp,
__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
- if (vaddr == (addr & ~(PMD_SIZE - 1)))
- pteret = pte_offset_kernel(pmdp, addr);
pte_newpg[i] = NULL;
}
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
vaddr += PMD_SIZE;
}
- return pteret;
+ return lookup_address(addr, &level);
}
/*
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
/* PMD level is missing, allocate a new one */
- ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+ ptep = alloc_p2m_pmd(addr, pte_pg);
if (!ptep)
return false;
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index dfd77dec8e2b..865e56cea7a0 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{
int i;
- unsigned long addr = PFN_PHYS(pfn);
+ phys_addr_t addr = PFN_PHYS(pfn);
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ if (!xen_extra_mem[i].size)
+ continue;
pfn_s = PFN_DOWN(xen_extra_mem[i].start);
pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
- unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
- unsigned long *released)
+ unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
{
- unsigned long len = 0;
unsigned long pfn, end;
int ret;
WARN_ON(start_pfn > end_pfn);
+ /* Release pages first. */
end = min(end_pfn, nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
+ (*released)++;
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
break;
- len++;
} else
break;
}
- /* Need to release pages first */
- *released += len;
- *identity += set_phys_range_identity(start_pfn, end_pfn);
+ set_phys_range_identity(start_pfn, end_pfn);
}
/*
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
}
/* Update kernel mapping, but not for highmem. */
- if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+ if (pfn >= PFN_UP(__pa(high_memory - 1)))
return;
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
unsigned long ident_pfn_iter, remap_pfn_iter;
unsigned long ident_end_pfn = start_pfn + size;
unsigned long left = size;
- unsigned long ident_cnt = 0;
unsigned int i, chunk;
WARN_ON(size == 0);
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
xen_remap_mfn = mfn;
/* Set identity map */
- ident_cnt += set_phys_range_identity(ident_pfn_iter,
- ident_pfn_iter + chunk);
+ set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
left -= chunk;
}
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
static unsigned long __init xen_set_identity_and_remap_chunk(
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
- unsigned long *identity, unsigned long *released)
+ unsigned long *released, unsigned long *remapped)
{
unsigned long pfn;
unsigned long i = 0;
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) {
/* Identity map remaining pages */
- *identity += set_phys_range_identity(cur_pfn,
- cur_pfn + size);
+ set_phys_range_identity(cur_pfn, cur_pfn + size);
break;
}
if (cur_pfn + size > nr_pages)
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
if (!remap_range_size) {
pr_warning("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
- cur_pfn + left, nr_pages, identity, released);
+ cur_pfn + left, nr_pages, released);
break;
}
/* Adjust size to fit in current e820 RAM region */
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
- *identity += size;
+ *remapped += size;
}
/*
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
static void __init xen_set_identity_and_remap(
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
- unsigned long *released)
+ unsigned long *released, unsigned long *remapped)
{
phys_addr_t start = 0;
- unsigned long identity = 0;
unsigned long last_pfn = nr_pages;
const struct e820entry *entry;
unsigned long num_released = 0;
+ unsigned long num_remapped = 0;
int i;
/*
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
last_pfn = xen_set_identity_and_remap_chunk(
list, map_size, start_pfn,
end_pfn, nr_pages, last_pfn,
- &identity, &num_released);
+ &num_released, &num_remapped);
start = end;
}
}
*released = num_released;
+ *remapped = num_remapped;
- pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
pr_info("Released %ld page(s)\n", num_released);
}
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
+ unsigned long remapped_pages;
int i;
int op;
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
* underlying RAM.
*/
xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
- &xen_released_pages);
+ &xen_released_pages, &remapped_pages);
extra_pages += xen_released_pages;
+ extra_pages += remapped_pages;
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index f473d268d387..69087341d9ae 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
struct xen_clock_event_device {
struct clock_event_device evt;
- char *name;
+ char name[16];
};
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
if (evt->irq >= 0) {
unbind_from_irqhandler(evt->irq, NULL);
evt->irq = -1;
- kfree(per_cpu(xen_clock_events, cpu).name);
- per_cpu(xen_clock_events, cpu).name = NULL;
}
}
void xen_setup_timer(int cpu)
{
- char *name;
- struct clock_event_device *evt;
+ struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
+ struct clock_event_device *evt = &xevt->evt;
int irq;
- evt = &per_cpu(xen_clock_events, cpu).evt;
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
if (evt->irq >= 0)
xen_teardown_timer(cpu);
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
- name = kasprintf(GFP_KERNEL, "timer%d", cpu);
- if (!name)
- name = "<timer kasprintf failed>";
+ snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
- name, NULL);
+ xevt->name, NULL);
(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
evt->irq = irq;
- per_cpu(xen_clock_events, cpu).name = name;
}
void xen_setup_cpu_clockevents(void)
{
- BUG_ON(preemptible());
-
clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
}
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index b57c4f91f487..9e3571a6535c 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -117,6 +117,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();