aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch240
1 files changed, 0 insertions, 240 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch
deleted file mode 100644
index cb74bad4..00000000
--- a/common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch
+++ /dev/null
@@ -1,240 +0,0 @@
-From cbf0028f2c499e981af020c1cdb6bff7d0b4e192 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 9 May 2018 21:53:09 +0200
-Subject: [PATCH 68/93] x86/speculation: Handle HT correctly on AMD
-
-commit 1f50ddb4f4189243c05926b842dc1a0332195f31 upstream
-
-The AMD64_LS_CFG MSR is a per core MSR on Family 17H CPUs. That means when
-hyperthreading is enabled the SSBD bit toggle needs to take both cores into
-account. Otherwise the following situation can happen:
-
-CPU0 CPU1
-
-disable SSB
- disable SSB
- enable SSB <- Enables it for the Core, i.e. for CPU0 as well
-
-So after the SSB enable on CPU1 the task on CPU0 runs with SSB enabled
-again.
-
-On Intel the SSBD control is per core as well, but the synchronization
-logic is implemented behind the per thread SPEC_CTRL MSR. It works like
-this:
-
- CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
-
-i.e. if one of the threads enables a mitigation then this affects both and
-the mitigation is only disabled in the core when both threads disabled it.
-
-Add the necessary synchronization logic for AMD family 17H. Unfortunately
-that requires a spinlock to serialize the access to the MSR, but the locks
-are only shared between siblings.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Borislav Petkov <bp@suse.de>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/x86/include/asm/spec-ctrl.h | 6 ++
- arch/x86/kernel/process.c | 125 +++++++++++++++++++++++++++++++++++++--
- arch/x86/kernel/smpboot.c | 5 ++
- 3 files changed, 130 insertions(+), 6 deletions(-)
-
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
-index dc21209..0cb49c4 100644
---- a/arch/x86/include/asm/spec-ctrl.h
-+++ b/arch/x86/include/asm/spec-ctrl.h
-@@ -33,6 +33,12 @@ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
- return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
- }
-
-+#ifdef CONFIG_SMP
-+extern void speculative_store_bypass_ht_init(void);
-+#else
-+static inline void speculative_store_bypass_ht_init(void) { }
-+#endif
-+
- extern void speculative_store_bypass_update(void);
-
- #endif
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index b3cd08e..1e9d155 100644
---- a/arch/x86/kernel/process.c
-+++ b/arch/x86/kernel/process.c
-@@ -203,22 +203,135 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
- }
- }
-
--static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
-+#ifdef CONFIG_SMP
-+
-+struct ssb_state {
-+ struct ssb_state *shared_state;
-+ raw_spinlock_t lock;
-+ unsigned int disable_state;
-+ unsigned long local_state;
-+};
-+
-+#define LSTATE_SSB 0
-+
-+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
-+
-+void speculative_store_bypass_ht_init(void)
- {
-- u64 msr;
-+ struct ssb_state *st = this_cpu_ptr(&ssb_state);
-+ unsigned int this_cpu = smp_processor_id();
-+ unsigned int cpu;
-+
-+ st->local_state = 0;
-+
-+ /*
-+ * Shared state setup happens once on the first bringup
-+ * of the CPU. It's not destroyed on CPU hotunplug.
-+ */
-+ if (st->shared_state)
-+ return;
-+
-+ raw_spin_lock_init(&st->lock);
-
-- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
-- msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
-+ /*
-+ * Go over HT siblings and check whether one of them has set up the
-+ * shared state pointer already.
-+ */
-+ for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
-+ if (cpu == this_cpu)
-+ continue;
-+
-+ if (!per_cpu(ssb_state, cpu).shared_state)
-+ continue;
-+
-+ /* Link it to the state of the sibling: */
-+ st->shared_state = per_cpu(ssb_state, cpu).shared_state;
-+ return;
-+ }
-+
-+ /*
-+ * First HT sibling to come up on the core. Link shared state of
-+ * the first HT sibling to itself. The siblings on the same core
-+ * which come up later will see the shared state pointer and link
-+ * themself to the state of this CPU.
-+ */
-+ st->shared_state = st;
-+}
-+
-+/*
-+ * Logic is: First HT sibling enables SSBD for both siblings in the core
-+ * and last sibling to disable it, disables it for the whole core. This how
-+ * MSR_SPEC_CTRL works in "hardware":
-+ *
-+ * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
-+ */
-+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
-+{
-+ struct ssb_state *st = this_cpu_ptr(&ssb_state);
-+ u64 msr = x86_amd_ls_cfg_base;
-+
-+ if (!static_cpu_has(X86_FEATURE_ZEN)) {
-+ msr |= ssbd_tif_to_amd_ls_cfg(tifn);
- wrmsrl(MSR_AMD64_LS_CFG, msr);
-+ return;
-+ }
-+
-+ if (tifn & _TIF_SSBD) {
-+ /*
-+ * Since this can race with prctl(), block reentry on the
-+ * same CPU.
-+ */
-+ if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
-+ return;
-+
-+ msr |= x86_amd_ls_cfg_ssbd_mask;
-+
-+ raw_spin_lock(&st->shared_state->lock);
-+ /* First sibling enables SSBD: */
-+ if (!st->shared_state->disable_state)
-+ wrmsrl(MSR_AMD64_LS_CFG, msr);
-+ st->shared_state->disable_state++;
-+ raw_spin_unlock(&st->shared_state->lock);
- } else {
-- msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
-- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
-+ if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
-+ return;
-+
-+ raw_spin_lock(&st->shared_state->lock);
-+ st->shared_state->disable_state--;
-+ if (!st->shared_state->disable_state)
-+ wrmsrl(MSR_AMD64_LS_CFG, msr);
-+ raw_spin_unlock(&st->shared_state->lock);
- }
- }
-+#else
-+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
-+{
-+ u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
-+
-+ wrmsrl(MSR_AMD64_LS_CFG, msr);
-+}
-+#endif
-+
-+static __always_inline void intel_set_ssb_state(unsigned long tifn)
-+{
-+ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
-+
-+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
-+}
-+
-+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
-+{
-+ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
-+ amd_set_core_ssb_state(tifn);
-+ else
-+ intel_set_ssb_state(tifn);
-+}
-
- void speculative_store_bypass_update(void)
- {
-+ preempt_disable();
- __speculative_store_bypass_update(current_thread_info()->flags);
-+ preempt_enable();
- }
-
- void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index 36171bc..c898a69 100644
---- a/arch/x86/kernel/smpboot.c
-+++ b/arch/x86/kernel/smpboot.c
-@@ -75,6 +75,7 @@
- #include <asm/i8259.h>
- #include <asm/realmode.h>
- #include <asm/misc.h>
-+#include <asm/spec-ctrl.h>
-
- /* Number of siblings per CPU package */
- int smp_num_siblings = 1;
-@@ -237,6 +238,8 @@ static void notrace start_secondary(void *unused)
- */
- check_tsc_sync_target();
-
-+ speculative_store_bypass_ht_init();
-+
- /*
- * Lock vector_lock and initialize the vectors on this cpu
- * before setting the cpu online. We must set it online with
-@@ -1333,6 +1336,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
- set_mtrr_aps_delayed_init();
-
- smp_quirk_init_udelay();
-+
-+ speculative_store_bypass_ht_init();
- }
-
- void arch_enable_nonboot_cpus_begin(void)
---
-2.7.4
-