aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-retpoline-Fill-return-stack-buffer-on-vmexit.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-retpoline-Fill-return-stack-buffer-on-vmexit.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-retpoline-Fill-return-stack-buffer-on-vmexit.patch195
1 files changed, 195 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-retpoline-Fill-return-stack-buffer-on-vmexit.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-retpoline-Fill-return-stack-buffer-on-vmexit.patch
new file mode 100644
index 00000000..40c846fc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-retpoline-Fill-return-stack-buffer-on-vmexit.patch
@@ -0,0 +1,195 @@
+From 718349fea3d22ecec829ef448f45f6eab4e5e2fa Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Fri, 12 Jan 2018 11:11:27 +0000
+Subject: [PATCH 075/103] x86/retpoline: Fill return stack buffer on vmexit
+
+commit 117cc7a908c83697b0b737d15ae1eb5943afe35b upstream.
+
+In accordance with the Intel and AMD documentation, we need to overwrite
+all entries in the RSB on exiting a guest, to prevent malicious branch
+target predictions from affecting the host kernel. This is needed both
+for retpoline and for IBRS.
+
+[ak: numbers again for the RSB stuffing labels]
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515755487-8524-1-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 78 +++++++++++++++++++++++++++++++++++-
+ arch/x86/kvm/svm.c | 4 ++
+ arch/x86/kvm/vmx.c | 4 ++
+ 3 files changed, 85 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index ea034fa..402a11c 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -7,6 +7,48 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/cpufeatures.h>
+
++/*
++ * Fill the CPU return stack buffer.
++ *
++ * Each entry in the RSB, if used for a speculative 'ret', contains an
++ * infinite 'pause; jmp' loop to capture speculative execution.
++ *
++ * This is required in various cases for retpoline and IBRS-based
++ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
++ * eliminate potentially bogus entries from the RSB, and sometimes
++ * purely to ensure that it doesn't get empty, which on some CPUs would
++ * allow predictions from other (unwanted!) sources to be used.
++ *
++ * We define a CPP macro such that it can be used from both .S files and
++ * inline assembly. It's possible to do a .macro and then include that
++ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
++ */
++
++#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
++#define RSB_FILL_LOOPS 16 /* To avoid underflow */
++
++/*
++ * Google experimented with loop-unrolling and this turned out to be
++ * the optimal version — two calls, each with their own speculation
++ * trap should their return address end up getting used, in a loop.
++ */
++#define __FILL_RETURN_BUFFER(reg, nr, sp) \
++ mov $(nr/2), reg; \
++771: \
++ call 772f; \
++773: /* speculation trap */ \
++ pause; \
++ jmp 773b; \
++772: \
++ call 774f; \
++775: /* speculation trap */ \
++ pause; \
++ jmp 775b; \
++774: \
++ dec reg; \
++ jnz 771b; \
++ add $(BITS_PER_LONG/8) * nr, sp;
++
+ #ifdef __ASSEMBLY__
+
+ /*
+@@ -76,6 +118,20 @@
+ #endif
+ .endm
+
++ /*
++ * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
++ * monstrosity above, manually.
++ */
++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
++#ifdef CONFIG_RETPOLINE
++ ANNOTATE_NOSPEC_ALTERNATIVE
++ ALTERNATIVE "jmp .Lskip_rsb_\@", \
++ __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
++ \ftr
++.Lskip_rsb_\@:
++#endif
++.endm
++
+ #else /* __ASSEMBLY__ */
+
+ #define ANNOTATE_NOSPEC_ALTERNATIVE \
+@@ -119,7 +175,7 @@
+ X86_FEATURE_RETPOLINE)
+
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+-#else /* No retpoline */
++#else /* No retpoline for C / inline asm */
+ # define CALL_NOSPEC "call *%[thunk_target]\n"
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+ #endif
+@@ -134,5 +190,25 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS,
+ };
+
++/*
++ * On VMEXIT we must ensure that no RSB predictions learned in the guest
++ * can be followed in the host, by overwriting the RSB completely. Both
++ * retpoline and IBRS mitigations for Spectre v2 need this; only on future
++ * CPUs with IBRS_ATT *might* it be avoided.
++ */
++static inline void vmexit_fill_RSB(void)
++{
++#ifdef CONFIG_RETPOLINE
++ unsigned long loops = RSB_CLEAR_LOOPS / 2;
++
++ asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
++ ALTERNATIVE("jmp 910f",
++ __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
++ X86_FEATURE_RETPOLINE)
++ "910:"
++ : "=&r" (loops), ASM_CALL_CONSTRAINT
++ : "r" (loops) : "memory" );
++#endif
++}
+ #endif /* __ASSEMBLY__ */
+ #endif /* __NOSPEC_BRANCH_H__ */
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 8ca1eca..975ea99 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -44,6 +44,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/kvm_para.h>
+ #include <asm/irq_remapping.h>
++#include <asm/nospec-branch.h>
+
+ #include <asm/virtext.h>
+ #include "trace.h"
+@@ -4886,6 +4887,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ );
+
++ /* Eliminate branch target predictions from guest mode */
++ vmexit_fill_RSB();
++
+ #ifdef CONFIG_X86_64
+ wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+ #else
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 69b8f8a..4ead27f 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -48,6 +48,7 @@
+ #include <asm/kexec.h>
+ #include <asm/apic.h>
+ #include <asm/irq_remapping.h>
++#include <asm/nospec-branch.h>
+
+ #include "trace.h"
+ #include "pmu.h"
+@@ -8989,6 +8990,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ );
+
++ /* Eliminate branch target predictions from guest mode */
++ vmexit_fill_RSB();
++
+ /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
+ if (debugctlmsr)
+ update_debugctlmsr(debugctlmsr);
+--
+2.7.4
+