aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-get_user-Use-pointer-masking-to-limit-speculatio.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-get_user-Use-pointer-masking-to-limit-speculatio.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-get_user-Use-pointer-masking-to-limit-speculatio.patch100
1 files changed, 100 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-get_user-Use-pointer-masking-to-limit-speculatio.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-get_user-Use-pointer-masking-to-limit-speculatio.patch
new file mode 100644
index 00000000..c58bff80
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-get_user-Use-pointer-masking-to-limit-speculatio.patch
@@ -0,0 +1,100 @@
+From aa9e88541e4443ffd498e0dd1912b2e658a659e6 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 29 Jan 2018 17:02:54 -0800
+Subject: [PATCH 31/42] x86/get_user: Use pointer masking to limit speculation
+
+(cherry picked from commit c7f631cb07e7da06ac1d231ca178452339e32a94)
+
+Quoting Linus:
+
+ I do think that it would be a good idea to very expressly document
+ the fact that it's not that the user access itself is unsafe. I do
+ agree that things like "get_user()" want to be protected, but not
+ because of any direct bugs or problems with get_user() and friends,
+ but simply because get_user() is an excellent source of a pointer
+ that is obviously controlled from a potentially attacking user
+ space. So it's a prime candidate for then finding _subsequent_
+ accesses that can then be used to perturb the cache.
+
+Unlike the __get_user() case get_user() includes the address limit check
+near the pointer de-reference. With that locality the speculation can be
+mitigated with pointer narrowing rather than a barrier, i.e.
+array_index_nospec(). Where the narrowing is performed by:
+
+ cmp %limit, %ptr
+ sbb %mask, %mask
+ and %mask, %ptr
+
+With respect to speculation the value of %ptr is either less than %limit
+or NULL.
+
+Co-developed-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kernel-hardening@lists.openwall.com
+Cc: gregkh@linuxfoundation.org
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: torvalds@linux-foundation.org
+Cc: alan@linux.intel.com
+Link: https://lkml.kernel.org/r/151727417469.33451.11804043010080838495.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/getuser.S | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
+index 37b62d4..b12b214 100644
+--- a/arch/x86/lib/getuser.S
++++ b/arch/x86/lib/getuser.S
+@@ -39,6 +39,8 @@ ENTRY(__get_user_1)
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
++ and %_ASM_DX, %_ASM_AX
+ ASM_STAC
+ 1: movzbl (%_ASM_AX),%edx
+ xor %eax,%eax
+@@ -53,6 +55,8 @@ ENTRY(__get_user_2)
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
++ and %_ASM_DX, %_ASM_AX
+ ASM_STAC
+ 2: movzwl -1(%_ASM_AX),%edx
+ xor %eax,%eax
+@@ -67,6 +71,8 @@ ENTRY(__get_user_4)
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
++ and %_ASM_DX, %_ASM_AX
+ ASM_STAC
+ 3: movl -3(%_ASM_AX),%edx
+ xor %eax,%eax
+@@ -82,6 +88,8 @@ ENTRY(__get_user_8)
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
++ and %_ASM_DX, %_ASM_AX
+ ASM_STAC
+ 4: movq -7(%_ASM_AX),%rdx
+ xor %eax,%eax
+@@ -93,6 +101,8 @@ ENTRY(__get_user_8)
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user_8
++ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
++ and %_ASM_DX, %_ASM_AX
+ ASM_STAC
+ 4: movl -7(%_ASM_AX),%edx
+ 5: movl -3(%_ASM_AX),%ecx
+--
+2.7.4
+