aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-retpoline-Fill-RSB-on-context-switch-for-affecte.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-retpoline-Fill-RSB-on-context-switch-for-affecte.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-retpoline-Fill-RSB-on-context-switch-for-affecte.patch179
1 files changed, 179 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-retpoline-Fill-RSB-on-context-switch-for-affecte.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-retpoline-Fill-RSB-on-context-switch-for-affecte.patch
new file mode 100644
index 00000000..5f178944
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-retpoline-Fill-RSB-on-context-switch-for-affecte.patch
@@ -0,0 +1,179 @@
+From cd12f9191c530e3e52ad02f08bfd59fc0e5aeb65 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Fri, 12 Jan 2018 17:49:25 +0000
+Subject: [PATCH 079/103] x86/retpoline: Fill RSB on context switch for
+ affected CPUs
+
+commit c995efd5a740d9cbafbf58bde4973e8b50b4d761 upstream.
+
+On context switch from a shallow call stack to a deeper one, as the CPU
+does 'ret' up the deeper side it may encounter RSB entries (predictions for
+where the 'ret' goes to) which were populated in userspace.
+
+This is problematic if neither SMEP nor KPTI (the latter of which marks
+userspace pages as NX for the kernel) are active, as malicious code in
+userspace may then be executed speculatively.
+
+Overwrite the CPU's return prediction stack with calls which are predicted
+to return to an infinite loop, to "capture" speculation if this
+happens. This is required both for retpoline, and also in conjunction with
+IBRS for !SMEP && !KPTI.
+
+On Skylake+ the problem is slightly different, and an *underflow* of the
+RSB may cause errant branch predictions to occur. So there it's not so much
+overwrite, as *filling* the RSB to attempt to prevent it getting
+empty. This is only a partial solution for Skylake+ since there are many
+other conditions which may result in the RSB becoming empty. The full
+solution on Skylake+ is to use IBRS, which will prevent the problem even
+when the RSB becomes empty. With IBRS, the RSB-stuffing will not be
+required on context switch.
+
+[ tglx: Added missing vendor check and slighty massaged comments and
+ changelog ]
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515779365-9032-1-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/entry_32.S | 11 +++++++++++
+ arch/x86/entry/entry_64.S | 11 +++++++++++
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++++++++++++++++++++
+ 4 files changed, 59 insertions(+)
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index bdc9aea..a76dc73 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -229,6 +229,17 @@ ENTRY(__switch_to_asm)
+ movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
+ #endif
+
++#ifdef CONFIG_RETPOLINE
++ /*
++ * When switching from a shallower to a deeper call stack
++ * the RSB may either underflow or use entries populated
++ * with userspace addresses. On CPUs where those concerns
++ * exist, overwrite the RSB with entries which capture
++ * speculative execution to prevent attack.
++ */
++ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++#endif
++
+ /* restore callee-saved registers */
+ popl %esi
+ popl %edi
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index f7ebaa1..eff47f5 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -427,6 +427,17 @@ ENTRY(__switch_to_asm)
+ movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
+ #endif
+
++#ifdef CONFIG_RETPOLINE
++ /*
++ * When switching from a shallower to a deeper call stack
++ * the RSB may either underflow or use entries populated
++ * with userspace addresses. On CPUs where those concerns
++ * exist, overwrite the RSB with entries which capture
++ * speculative execution to prevent attack.
++ */
++ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++#endif
++
+ /* restore callee-saved registers */
+ popq %r15
+ popq %r14
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 4467568..2f60cb5 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -200,6 +200,7 @@
+ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+ #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
+
+ /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+ #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 49d25dd..8cacf62 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -22,6 +22,7 @@
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
++#include <asm/intel-family.h>
+
+ static void __init spectre_v2_select_mitigation(void);
+
+@@ -154,6 +155,23 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+ return SPECTRE_V2_CMD_NONE;
+ }
+
++/* Check for Skylake-like CPUs (for RSB handling) */
++static bool __init is_skylake_era(void)
++{
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
++ boot_cpu_data.x86 == 6) {
++ switch (boot_cpu_data.x86_model) {
++ case INTEL_FAM6_SKYLAKE_MOBILE:
++ case INTEL_FAM6_SKYLAKE_DESKTOP:
++ case INTEL_FAM6_SKYLAKE_X:
++ case INTEL_FAM6_KABYLAKE_MOBILE:
++ case INTEL_FAM6_KABYLAKE_DESKTOP:
++ return true;
++ }
++ }
++ return false;
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -212,6 +230,24 @@ static void __init spectre_v2_select_mitigation(void)
+
+ spectre_v2_enabled = mode;
+ pr_info("%s\n", spectre_v2_strings[mode]);
++
++ /*
++ * If neither SMEP or KPTI are available, there is a risk of
++ * hitting userspace addresses in the RSB after a context switch
++ * from a shallow call stack to a deeper one. To prevent this fill
++ * the entire RSB, even when using IBRS.
++ *
++ * Skylake era CPUs have a separate issue with *underflow* of the
++ * RSB, when they will predict 'ret' targets from the generic BTB.
++ * The proper mitigation for this is IBRS. If IBRS is not supported
++ * or deactivated in favour of retpolines the RSB fill on context
++ * switch is required.
++ */
++ if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
++ !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
++ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
++ pr_info("Filling RSB on context switch\n");
++ }
+ }
+
+ #undef pr_fmt
+--
+2.7.4
+