aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch188
1 files changed, 188 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch
new file mode 100644
index 00000000..a9ca3a12
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch
@@ -0,0 +1,188 @@
+From 194c3226782279235d34de040e318d30665b7b5b Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Mon, 22 Oct 2018 13:28:34 +0530
+Subject: [PATCH 59/95] x86/CPU/AMD: Add the Secure Encrypted Virtualization
+ CPU feature
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Update the CPU features to include identifying and reporting on the
+Secure Encrypted Virtualization (SEV) feature. SEV is identified by
+CPUID 0x8000001f, but requires BIOS support to enable it (set bit 23 of
+MSR_K8_SYSCFG and set bit 0 of MSR_K7_HWCR). Only show the SEV feature
+as available if reported by CPUID and enabled by BIOS.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: kvm@vger.kernel.org
+Cc: x86@kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 +-
+ arch/x86/include/asm/msr-index.h | 2 +
+ arch/x86/kernel/cpu/amd.c | 82 +++++++++++++++++++++-----------------
+ arch/x86/kernel/cpu/scattered.c | 1 +
+ 4 files changed, 50 insertions(+), 37 deletions(-)
+ mode change 100644 => 100755 arch/x86/include/asm/cpufeatures.h
+ mode change 100644 => 100755 arch/x86/include/asm/msr-index.h
+ mode change 100644 => 100755 arch/x86/kernel/cpu/amd.c
+ mode change 100644 => 100755 arch/x86/kernel/cpu/scattered.c
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+old mode 100644
+new mode 100755
+index 8418462..76a0ba0
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -210,7 +210,7 @@
+ #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+-
++#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+old mode 100644
+new mode 100755
+index 42b18cc..55d802c
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -413,6 +413,8 @@
+ #define MSR_K7_PERFCTR3 0xc0010007
+ #define MSR_K7_CLK_CTL 0xc001001b
+ #define MSR_K7_HWCR 0xc0010015
++#define MSR_K7_HWCR_SMMLOCK_BIT 0
++#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
+ #define MSR_K7_FID_VID_CTL 0xc0010041
+ #define MSR_K7_FID_VID_STATUS 0xc0010042
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+old mode 100644
+new mode 100755
+index dda741b..c6333e7
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -575,6 +575,51 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ }
+ }
+
++static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
++{
++ u64 msr;
++
++ /*
++ * BIOS support is required for SME and SEV.
++ * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
++ * the SME physical address space reduction value.
++ * If BIOS has not enabled SME then don't advertise the
++ * SME feature (set in scattered.c).
++ * For SEV: If BIOS has not enabled SEV then don't advertise the
++ * SEV feature (set in scattered.c).
++ *
++ * In all cases, since support for SME and SEV requires long mode,
++ * don't advertise the feature under CONFIG_X86_32.
++ */
++ if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
++ /* Check if memory encryption is enabled */
++ rdmsrl(MSR_K8_SYSCFG, msr);
++ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
++ goto clear_all;
++
++ /*
++ * Always adjust physical address bits. Even though this
++ * will be a value above 32-bits this is still done for
++ * CONFIG_X86_32 so that accurate values are reported.
++ */
++ c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
++
++ if (IS_ENABLED(CONFIG_X86_32))
++ goto clear_all;
++
++ rdmsrl(MSR_K7_HWCR, msr);
++ if (!(msr & MSR_K7_HWCR_SMMLOCK))
++ goto clear_sev;
++
++ return;
++
++clear_all:
++ clear_cpu_cap(c, X86_FEATURE_SME);
++clear_sev:
++ clear_cpu_cap(c, X86_FEATURE_SEV);
++ }
++}
++
+ static void early_init_amd(struct cpuinfo_x86 *c)
+ {
+ u64 value;
+@@ -647,42 +692,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
+ if (cpu_has_amd_erratum(c, amd_erratum_400))
+ set_cpu_bug(c, X86_BUG_AMD_E400);
+
+- /*
+- * BIOS support is required for SME. If BIOS has enabled SME then
+- * adjust x86_phys_bits by the SME physical address space reduction
+- * value. If BIOS has not enabled SME then don't advertise the
+- * feature (set in scattered.c). Also, since the SME support requires
+- * long mode, don't advertise the feature under CONFIG_X86_32.
+- */
+- if (cpu_has(c, X86_FEATURE_SME)) {
+- u64 msr;
+-
+- /* Check if SME is enabled */
+- rdmsrl(MSR_K8_SYSCFG, msr);
+- if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
+- c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
+- if (IS_ENABLED(CONFIG_X86_32))
+- clear_cpu_cap(c, X86_FEATURE_SME);
+- } else {
+- clear_cpu_cap(c, X86_FEATURE_SME);
+- }
+- }
+-
+- /* Re-enable TopologyExtensions if switched off by BIOS */
+- if (c->x86 == 0x15 &&
+- (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
+- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+-
+- if (msr_set_bit(0xc0011005, 54) > 0) {
+- rdmsrl(0xc0011005, value);
+- if (value & BIT_64(54)) {
+- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+- pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+- }
+- }
+- }
+-
+- amd_get_topology_early(c);
++ early_detect_mem_encrypt(c);
+ }
+
+ static void init_amd_k8(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+old mode 100644
+new mode 100755
+index df11f5d..3320773
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -29,6 +29,7 @@ static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
++ { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
+ { 0, 0, 0, 0, 0 }
+ };
+
+--
+2.7.4
+