aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch121
1 files changed, 121 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch
new file mode 100644
index 00000000..3ce54c27
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch
@@ -0,0 +1,121 @@
+From 6bcf9c90963dc4402738e2875e446770d283b08c Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Mon, 13 Aug 2018 12:19:45 +0530
+Subject: [PATCH 43/95] x86/mm: Add Secure Encrypted Virtualization (SEV)
+ support
+
+From d8aa7eea78a1401cce39b3bb61ead0150044a3df
+
+Provide support for Secure Encrypted Virtualization (SEV). This initial
+support defines a flag that is used by the kernel to determine if it is
+running with SEV active.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: kvm@vger.kernel.org
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: https://lkml.kernel.org/r/20171020143059.3291-3-brijesh.singh@amd.com
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/mem_encrypt.h | 6 ++++++
+ arch/x86/mm/mem_encrypt.c | 26 ++++++++++++++++++++++++++
+ include/linux/mem_encrypt.h | 7 +++++--
+ 3 files changed, 37 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index e7d96c0..ad91ab5 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -47,6 +47,9 @@ void __init mem_encrypt_init(void);
+
+ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
+
++bool sme_active(void);
++bool sev_active(void);
++
+ #else /* !CONFIG_AMD_MEM_ENCRYPT */
+
+ #define sme_me_mask 0ULL
+@@ -64,6 +67,9 @@ static inline void __init sme_early_init(void) { }
+ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
+ static inline void __init sme_enable(struct boot_params *bp) { }
+
++static inline bool sme_active(void) { return false; }
++static inline bool sev_active(void) { return false; }
++
+ #endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+ /*
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index 48c03c7..4e4a304 100644
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -42,6 +42,8 @@ static char sme_cmdline_off[] __initdata = "off";
+ u64 sme_me_mask __section(.data) = 0;
+ EXPORT_SYMBOL(sme_me_mask);
+
++static bool sev_enabled __section(.data);
++
+ /* Buffer used for early in-place encryption by BSP, no locking needed */
+ static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+@@ -192,6 +194,30 @@ void __init sme_early_init(void)
+ protection_map[i] = pgprot_encrypted(protection_map[i]);
+ }
+
++/*
++ * SME and SEV are very similar but they are not the same, so there are
++ * times that the kernel will need to distinguish between SME and SEV. The
++ * sme_active() and sev_active() functions are used for this. When a
++ * distinction isn't needed, the mem_encrypt_active() function can be used.
++ *
++ * The trampoline code is a good example for this requirement. Before
++ * paging is activated, SME will access all memory as decrypted, but SEV
++ * will access all memory as encrypted. So, when APs are being brought
++ * up under SME the trampoline area cannot be encrypted, whereas under SEV
++ * the trampoline area must be encrypted.
++ */
++bool sme_active(void)
++{
++ return sme_me_mask && !sev_enabled;
++}
++EXPORT_SYMBOL_GPL(sme_active);
++
++bool sev_active(void)
++{
++ return sme_me_mask && sev_enabled;
++}
++EXPORT_SYMBOL_GPL(sev_active);
++
+ /* Architecture __weak replacement functions */
+ void __init mem_encrypt_init(void)
+ {
+diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
+index 265a9cd..b310a9c 100644
+--- a/include/linux/mem_encrypt.h
++++ b/include/linux/mem_encrypt.h
+@@ -23,11 +23,14 @@
+
+ #define sme_me_mask 0ULL
+
++static inline bool sme_active(void) { return false; }
++static inline bool sev_active(void) { return false; }
++
+ #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
+
+-static inline bool sme_active(void)
++static inline bool mem_encrypt_active(void)
+ {
+- return !!sme_me_mask;
++ return sme_me_mask;
+ }
+
+ static inline u64 sme_get_me_mask(void)
+--
+2.7.4
+