aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch163
1 files changed, 163 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch
new file mode 100644
index 00000000..84d35057
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch
@@ -0,0 +1,163 @@
+From f8a3968ae9a100977e28f434f303fd74a0a8591b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 20:21:36 +0200
+Subject: [PATCH 65/93] x86/cpufeatures: Disentangle SSBD enumeration
+
+commit 52817587e706686fcdb27f14c1b000c92f266c96 upstream
+
+The SSBD enumeration is similarly to the other bits magically shared
+between Intel and AMD though the mechanisms are different.
+
+Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
+features or family dependent setup.
+
+Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
+controlled via MSR_SPEC_CTRL and fix up the usage sites.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 5 +++--
+ arch/x86/kernel/cpu/amd.c | 7 +------
+ arch/x86/kernel/cpu/bugs.c | 10 +++++-----
+ arch/x86/kernel/cpu/common.c | 3 +++
+ arch/x86/kernel/cpu/intel.c | 1 +
+ arch/x86/kernel/process.c | 2 +-
+ 6 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index ca0f33f..d071767 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -198,6 +198,7 @@
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
+ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
++#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+@@ -207,7 +208,7 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+-#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
++#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
+ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+@@ -314,7 +315,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+-#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
++#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
+ /*
+ * BUG word(s)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index acb2fcc..179d572 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -558,8 +558,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ * avoid RMW. If that faults, do not enable SSBD.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
++ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
+- setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
+ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+ }
+ }
+@@ -848,11 +848,6 @@ static void init_amd(struct cpuinfo_x86 *c)
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+-
+- if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
+- set_cpu_cap(c, X86_FEATURE_SSBD);
+- set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
+- }
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 59649310..15a6c58 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -158,8 +158,8 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- /* Intel controls SSB in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
++ /* SSBD controlled in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -175,8 +175,8 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- /* Intel controls SSB in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
++ /* SSBD controlled in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -188,7 +188,7 @@ static void x86_amd_ssb_disable(void)
+ {
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+- if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
++ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 04362282..945e841 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -735,6 +735,9 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
++ set_cpu_cap(c, X86_FEATURE_SSBD);
++
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 7f495e8..93781e3 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -156,6 +156,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
++ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
+ }
+
+ /*
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index c344230..b3cd08e 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -207,7 +207,7 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
+ {
+ u64 msr;
+
+- if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ } else {
+--
+2.7.4
+