aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-retpoline-entry-Convert-entry-assembler-indirect.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-retpoline-entry-Convert-entry-assembler-indirect.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-retpoline-entry-Convert-entry-assembler-indirect.patch122
1 files changed, 122 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-retpoline-entry-Convert-entry-assembler-indirect.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-retpoline-entry-Convert-entry-assembler-indirect.patch
new file mode 100644
index 00000000..db94d3d8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-retpoline-entry-Convert-entry-assembler-indirect.patch
@@ -0,0 +1,122 @@
+From a2073819181d22ec2197b919f5f5d3a7305dd5c0 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:28 +0000
+Subject: [PATCH 069/103] x86/retpoline/entry: Convert entry assembler indirect
+ jumps
+
+commit 2641f08bb7fc63a636a2b18173221d7040a3512e upstream.
+
+Convert indirect jumps in core 32/64bit entry assembler code to use
+non-speculative sequences when CONFIG_RETPOLINE is enabled.
+
+Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
+address after the 'call' instruction must be *precisely* at the
+.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
+and the use of alternatives will mess that up unless we play horrid
+games to prepend with NOPs and make the variants the same length. It's
+not worth it; in the case where we ALTERNATIVE out the retpoline, the
+first instruction at __x86.indirect_thunk.rax is going to be a bare
+jmp *%rax anyway.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/entry_32.S | 5 +++--
+ arch/x86/entry/entry_64.S | 10 ++++++++--
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index edba860..7b95f35 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -45,6 +45,7 @@
+ #include <asm/asm.h>
+ #include <asm/smap.h>
+ #include <asm/export.h>
++#include <asm/nospec-branch.h>
+
+ .section .entry.text, "ax"
+
+@@ -260,7 +261,7 @@ ENTRY(ret_from_fork)
+
+ /* kernel thread */
+ 1: movl %edi, %eax
+- call *%ebx
++ CALL_NOSPEC %ebx
+ /*
+ * A kernel thread is allowed to return here after successfully
+ * calling do_execve(). Exit to userspace to complete the execve()
+@@ -1062,7 +1063,7 @@ error_code:
+ movl %ecx, %es
+ TRACE_IRQS_OFF
+ movl %esp, %eax # pt_regs pointer
+- call *%edi
++ CALL_NOSPEC %edi
+ jmp ret_from_exception
+ END(page_fault)
+
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 5bb9b02..f7ebaa1 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -37,6 +37,7 @@
+ #include <asm/pgtable_types.h>
+ #include <asm/export.h>
+ #include <asm/kaiser.h>
++#include <asm/nospec-branch.h>
+ #include <linux/err.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+@@ -208,7 +209,12 @@ entry_SYSCALL_64_fastpath:
+ * It might end up jumping to the slow path. If it jumps, RAX
+ * and all argument registers are clobbered.
+ */
++#ifdef CONFIG_RETPOLINE
++ movq sys_call_table(, %rax, 8), %rax
++ call __x86_indirect_thunk_rax
++#else
+ call *sys_call_table(, %rax, 8)
++#endif
+ .Lentry_SYSCALL_64_after_fastpath_call:
+
+ movq %rax, RAX(%rsp)
+@@ -380,7 +386,7 @@ ENTRY(stub_ptregs_64)
+ jmp entry_SYSCALL64_slow_path
+
+ 1:
+- jmp *%rax /* Called from C */
++ JMP_NOSPEC %rax /* Called from C */
+ END(stub_ptregs_64)
+
+ .macro ptregs_stub func
+@@ -457,7 +463,7 @@ ENTRY(ret_from_fork)
+ 1:
+ /* kernel thread */
+ movq %r12, %rdi
+- call *%rbx
++ CALL_NOSPEC %rbx
+ /*
+ * A kernel thread is allowed to return here after successfully
+ * calling do_execve(). Exit to userspace to complete the execve()
+--
+2.7.4
+