aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch241
1 files changed, 241 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch
new file mode 100644
index 00000000..de5ae0c2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch
@@ -0,0 +1,241 @@
+From 708128a64a6b750b63a5f1ca1e943c48023145b9 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 10 May 2018 22:06:39 +0200
+Subject: [PATCH 77/93] KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
+
+commit bc226f07dcd3c9ef0b7f6236fe356ea4a9cb4769 upstream
+
+Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
+speculative store bypass disable (SSBD) under SVM. This will allow guests
+to use SSBD on hardware that uses non-architectural mechanisms for enabling
+SSBD.
+
+[ tglx: Folded the migration fixup from Paolo Bonzini ]
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kernel/cpu/common.c | 3 ++-
+ arch/x86/kvm/cpuid.c | 11 +++++++++--
+ arch/x86/kvm/cpuid.h | 9 +++++++++
+ arch/x86/kvm/svm.c | 21 +++++++++++++++++++--
+ arch/x86/kvm/vmx.c | 18 +++++++++++++++---
+ arch/x86/kvm/x86.c | 13 ++++---------
+ 7 files changed, 59 insertions(+), 18 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 6f6ee68..fd3a854 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -864,7 +864,7 @@ struct kvm_x86_ops {
+ int (*hardware_setup)(void); /* __init */
+ void (*hardware_unsetup)(void); /* __exit */
+ bool (*cpu_has_accelerated_tpr)(void);
+- bool (*cpu_has_high_real_mode_segbase)(void);
++ bool (*has_emulated_msr)(int index);
+ void (*cpuid_update)(struct kvm_vcpu *vcpu);
+
+ int (*vm_init)(struct kvm *kvm);
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 945e841..40fc748 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -735,7 +735,8 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+- if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
++ cpu_has(c, X86_FEATURE_VIRT_SSBD))
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 4ccdfbe..4d3269b 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(AMD_IBPB) | F(AMD_IBRS);
++ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -618,13 +618,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ g_phys_as = phys_as;
+ entry->eax = g_phys_as | (virt_as << 8);
+ entry->edx = 0;
+- /* IBRS and IBPB aren't necessarily present in hardware cpuid */
++ /*
++ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
++ * hardware cpuid
++ */
+ if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+ entry->ebx |= F(AMD_IBPB);
+ if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+ entry->ebx |= F(AMD_IBRS);
++ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
++ entry->ebx |= F(VIRT_SSBD);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
++ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ entry->ebx |= F(VIRT_SSBD);
+ break;
+ }
+ case 0x80000019:
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 410070c..d22695c 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -182,6 +182,15 @@ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+ return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
+ }
+
++static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
++{
++ struct kvm_cpuid_entry2 *best;
++
++ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
++ return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
++}
++
++
+
+ /*
+ * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 481b106..c60d8fc 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3552,6 +3552,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+
+ msr_info->data = svm->spec_ctrl;
+ break;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ if (!msr_info->host_initiated &&
++ !guest_cpuid_has_virt_ssbd(vcpu))
++ return 1;
++
++ msr_info->data = svm->virt_spec_ctrl;
++ break;
+ case MSR_IA32_UCODE_REV:
+ msr_info->data = 0x01000065;
+ break;
+@@ -3679,6 +3686,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ break;
+ set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
+ break;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ if (!msr->host_initiated &&
++ !guest_cpuid_has_virt_ssbd(vcpu))
++ return 1;
++
++ if (data & ~SPEC_CTRL_SSBD)
++ return 1;
++
++ svm->virt_spec_ctrl = data;
++ break;
+ case MSR_STAR:
+ svm->vmcb->save.star = data;
+ break;
+@@ -5138,7 +5155,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
+ return false;
+ }
+
+-static bool svm_has_high_real_mode_segbase(void)
++static bool svm_has_emulated_msr(int index)
+ {
+ return true;
+ }
+@@ -5455,7 +5472,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .hardware_enable = svm_hardware_enable,
+ .hardware_disable = svm_hardware_disable,
+ .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
++ .has_emulated_msr = svm_has_emulated_msr,
+
+ .vcpu_create = svm_create_vcpu,
+ .vcpu_free = svm_free_vcpu,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 55af4b6..7b4739c 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8673,9 +8673,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
+ }
+ }
+
+-static bool vmx_has_high_real_mode_segbase(void)
++static bool vmx_has_emulated_msr(int index)
+ {
+- return enable_unrestricted_guest || emulate_invalid_guest_state;
++ switch (index) {
++ case MSR_IA32_SMBASE:
++ /*
++ * We cannot do SMM unless we can run the guest in big
++ * real mode.
++ */
++ return enable_unrestricted_guest || emulate_invalid_guest_state;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ /* This is AMD only. */
++ return false;
++ default:
++ return true;
++ }
+ }
+
+ static bool vmx_mpx_supported(void)
+@@ -11304,7 +11316,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .hardware_enable = hardware_enable,
+ .hardware_disable = hardware_disable,
+ .cpu_has_accelerated_tpr = report_flexpriority,
+- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
++ .has_emulated_msr = vmx_has_emulated_msr,
+
+ .vcpu_create = vmx_create_vcpu,
+ .vcpu_free = vmx_free_vcpu,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b27b93d..c531231 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1002,6 +1002,7 @@ static u32 emulated_msrs[] = {
+ MSR_IA32_MCG_CTL,
+ MSR_IA32_MCG_EXT_CTL,
+ MSR_IA32_SMBASE,
++ MSR_AMD64_VIRT_SPEC_CTRL,
+ };
+
+ static unsigned num_emulated_msrs;
+@@ -2650,7 +2651,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ * fringe case that is not enabled except via specific settings
+ * of the module parameters.
+ */
+- r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
++ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
+ break;
+ case KVM_CAP_COALESCED_MMIO:
+ r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+@@ -4201,14 +4202,8 @@ static void kvm_init_msr_list(void)
+ num_msrs_to_save = j;
+
+ for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
+- switch (emulated_msrs[i]) {
+- case MSR_IA32_SMBASE:
+- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
+- continue;
+- break;
+- default:
+- break;
+- }
++ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
++ continue;
+
+ if (j < i)
+ emulated_msrs[j] = emulated_msrs[i];
+--
+2.7.4
+