aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-Use-Indirect-Branch-Prediction-Barri.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-Use-Indirect-Branch-Prediction-Barri.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-Use-Indirect-Branch-Prediction-Barri.patch129
1 files changed, 129 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-Use-Indirect-Branch-Prediction-Barri.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-Use-Indirect-Branch-Prediction-Barri.patch
new file mode 100644
index 00000000..90877ac8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-Use-Indirect-Branch-Prediction-Barri.patch
@@ -0,0 +1,129 @@
+From b3ad1b7521b3f4aaddc02e93ce3835bcac48da35 Mon Sep 17 00:00:00 2001
+From: Tim Chen <tim.c.chen@linux.intel.com>
+Date: Mon, 29 Jan 2018 22:04:47 +0000
+Subject: [PATCH 06/14] x86/speculation: Use Indirect Branch Prediction Barrier
+ in context switch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit 18bf3c3ea8ece8f03b6fc58508f2dfd23c7711c7 upstream.
+
+Flush indirect branches when switching into a process that marked itself
+non dumpable. This protects high value processes like gpg better,
+without having too high performance overhead.
+
+If done naïvely, we could switch to a kernel idle thread and then back
+to the original process, such as:
+
+ process A -> idle -> process A
+
+In such scenario, we do not have to do IBPB here even though the process
+is non-dumpable, as we are switching back to the same process after a
+hiatus.
+
+To avoid the redundant IBPB, which is expensive, we track the last mm
+user context ID. The cost is to have an extra u64 mm context id to track
+the last mm we were using before switching to the init_mm used by idle.
+Avoiding the extra IBPB is probably worth the extra memory for this
+common scenario.
+
+For those cases where tlb_defer_switch_to_init_mm() returns true (non
+PCID), lazy tlb will defer switch to init_mm, so we will not be changing
+the mm for the process A -> idle -> process A switch. So IBPB will be
+skipped for this case.
+
+Thanks to the reviewers and Andy Lutomirski for the suggestion of
+using ctx_id which got rid of the problem of mm pointer recycling.
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ak@linux.intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: linux@dominikbrodowski.net
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: luto@kernel.org
+Cc: pbonzini@redhat.com
+Link: https://lkml.kernel.org/r/1517263487-3708-1-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/tlbflush.h | 2 ++
+ arch/x86/mm/tlb.c | 31 +++++++++++++++++++++++++++++++
+ 2 files changed, 33 insertions(+)
+
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 94146f6..99185a0 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -68,6 +68,8 @@ static inline void invpcid_flush_all_nonglobals(void)
+ struct tlb_state {
+ struct mm_struct *active_mm;
+ int state;
++ /* last user mm's ctx id */
++ u64 last_ctx_id;
+
+ /*
+ * Access to this CR4 shadow and to H/W CR4 is protected by
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 146e842..b1bf41b 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -10,6 +10,7 @@
+
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
++#include <asm/nospec-branch.h>
+ #include <asm/cache.h>
+ #include <asm/apic.h>
+ #include <asm/uv/uv.h>
+@@ -106,6 +107,28 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ unsigned cpu = smp_processor_id();
+
+ if (likely(prev != next)) {
++ u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
++
++ /*
++ * Avoid user/user BTB poisoning by flushing the branch
++ * predictor when switching between processes. This stops
++ * one process from doing Spectre-v2 attacks on another.
++ *
++ * As an optimization, flush indirect branches only when
++ * switching into processes that disable dumping. This
++ * protects high value processes like gpg, without having
++ * too high performance overhead. IBPB is *expensive*!
++ *
++ * This will not flush branches when switching into kernel
++ * threads. It will also not flush if we switch to idle
++ * thread and back to the same process. It will flush if we
++ * switch to a different non-dumpable process.
++ */
++ if (tsk && tsk->mm &&
++ tsk->mm->context.ctx_id != last_ctx_id &&
++ get_dumpable(tsk->mm) != SUID_DUMP_USER)
++ indirect_branch_prediction_barrier();
++
+ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+ /*
+ * If our current stack is in vmalloc space and isn't
+@@ -120,6 +143,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
+ }
+
++ /*
++ * Record last user mm's context id, so we can avoid
++ * flushing branch buffer with IBPB if we switch back
++ * to the same user.
++ */
++ if (next != &init_mm)
++ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
++
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ this_cpu_write(cpu_tlbstate.active_mm, next);
+
+--
+2.7.4
+