aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-spectre_v1-Disable-compiler-optimizations-over-a.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-spectre_v1-Disable-compiler-optimizations-over-a.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-spectre_v1-Disable-compiler-optimizations-over-a.patch84
1 files changed, 84 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-spectre_v1-Disable-compiler-optimizations-over-a.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-spectre_v1-Disable-compiler-optimizations-over-a.patch
new file mode 100644
index 00000000..a8632983
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-spectre_v1-Disable-compiler-optimizations-over-a.patch
@@ -0,0 +1,84 @@
+From d98751217028054a791c98512d1ed81d406f55da Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 7 Jun 2018 09:13:48 -0700
+Subject: [PATCH 06/10] x86/spectre_v1: Disable compiler optimizations over
+ array_index_mask_nospec()
+
+commit eab6870fee877258122a042bfd99ee7908c40280 upstream.
+
+Mark Rutland noticed that GCC optimization passes have the potential to elide
+necessary invocations of the array_index_mask_nospec() instruction sequence,
+so mark the asm() volatile.
+
+Mark explains:
+
+"The volatile will inhibit *some* cases where the compiler could lift the
+ array_index_nospec() call out of a branch, e.g. where there are multiple
+ invocations of array_index_nospec() with the same arguments:
+
+ if (idx < foo) {
+ idx1 = array_idx_nospec(idx, foo)
+ do_something(idx1);
+ }
+
+ < some other code >
+
+ if (idx < foo) {
+ idx2 = array_idx_nospec(idx, foo);
+ do_something_else(idx2);
+ }
+
+ ... since the compiler can determine that the two invocations yield the same
+ result, and reuse the first result (likely the same register as idx was in
+ originally) for the second branch, effectively re-writing the above as:
+
+ if (idx < foo) {
+ idx = array_idx_nospec(idx, foo);
+ do_something(idx);
+ }
+
+ < some other code >
+
+ if (idx < foo) {
+ do_something_else(idx);
+ }
+
+ ... if we don't take the first branch, then speculatively take the second, we
+ lose the nospec protection.
+
+ There's more info on volatile asm in the GCC docs:
+
+ https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html#Volatile
+ "
+
+Reported-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: <stable@vger.kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Fixes: babdde2698d4 ("x86: Implement array_index_mask_nospec")
+Link: https://lkml.kernel.org/lkml/152838798950.14521.4893346294059739135.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/barrier.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index 78d1c6a..eb53c2c 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -37,7 +37,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ {
+ unsigned long mask;
+
+- asm ("cmp %1,%2; sbb %0,%0;"
++ asm volatile ("cmp %1,%2; sbb %0,%0;"
+ :"=r" (mask)
+ :"g"(size),"r" (index)
+ :"cc");
+--
+2.7.4
+