aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0018-KVM-VMX-introduce-alloc_loaded_vmcs.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0018-KVM-VMX-introduce-alloc_loaded_vmcs.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0018-KVM-VMX-introduce-alloc_loaded_vmcs.patch104
1 files changed, 104 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0018-KVM-VMX-introduce-alloc_loaded_vmcs.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0018-KVM-VMX-introduce-alloc_loaded_vmcs.patch
new file mode 100644
index 00000000..a22f91a8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0018-KVM-VMX-introduce-alloc_loaded_vmcs.patch
@@ -0,0 +1,104 @@
+From 80f4f0e9de9cce1047ac0aac305aca7310e37313 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 11 Jan 2018 12:16:15 +0100
+Subject: [PATCH 18/33] KVM: VMX: introduce alloc_loaded_vmcs
+
+(cherry picked from commit f21f165ef922c2146cc5bdc620f542953c41714b)
+
+Group together the calls to alloc_vmcs and loaded_vmcs_init. Soon we'll also
+allocate an MSR bitmap there.
+
+Cc: stable@vger.kernel.org # prereq for Spectre mitigation
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 38 +++++++++++++++++++++++---------------
+ 1 file changed, 23 insertions(+), 15 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 099f221..6814355 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3514,11 +3514,6 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
+ return vmcs;
+ }
+
+-static struct vmcs *alloc_vmcs(void)
+-{
+- return alloc_vmcs_cpu(raw_smp_processor_id());
+-}
+-
+ static void free_vmcs(struct vmcs *vmcs)
+ {
+ free_pages((unsigned long)vmcs, vmcs_config.order);
+@@ -3537,6 +3532,22 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
+ WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
+ }
+
++static struct vmcs *alloc_vmcs(void)
++{
++ return alloc_vmcs_cpu(raw_smp_processor_id());
++}
++
++static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
++{
++ loaded_vmcs->vmcs = alloc_vmcs();
++ if (!loaded_vmcs->vmcs)
++ return -ENOMEM;
++
++ loaded_vmcs->shadow_vmcs = NULL;
++ loaded_vmcs_init(loaded_vmcs);
++ return 0;
++}
++
+ static void free_kvm_area(void)
+ {
+ int cpu;
+@@ -6916,6 +6927,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+ struct vmcs *shadow_vmcs;
+ const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
+ | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
++ int r;
+
+ /* The Intel VMX Instruction Reference lists a bunch of bits that
+ * are prerequisite to running VMXON, most notably cr4.VMXE must be
+@@ -6955,11 +6967,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+ return 1;
+ }
+
+- vmx->nested.vmcs02.vmcs = alloc_vmcs();
+- vmx->nested.vmcs02.shadow_vmcs = NULL;
+- if (!vmx->nested.vmcs02.vmcs)
++ r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
++ if (r < 0)
+ goto out_vmcs02;
+- loaded_vmcs_init(&vmx->nested.vmcs02);
+
+ if (cpu_has_vmx_msr_bitmap()) {
+ vmx->nested.msr_bitmap =
+@@ -9090,17 +9100,15 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+ if (!vmx->guest_msrs)
+ goto free_pml;
+
+- vmx->loaded_vmcs = &vmx->vmcs01;
+- vmx->loaded_vmcs->vmcs = alloc_vmcs();
+- vmx->loaded_vmcs->shadow_vmcs = NULL;
+- if (!vmx->loaded_vmcs->vmcs)
+- goto free_msrs;
+ if (!vmm_exclusive)
+ kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
+- loaded_vmcs_init(vmx->loaded_vmcs);
++ err = alloc_loaded_vmcs(&vmx->vmcs01);
+ if (!vmm_exclusive)
+ kvm_cpu_vmxoff();
++ if (err < 0)
++ goto free_msrs;
+
++ vmx->loaded_vmcs = &vmx->vmcs01;
+ cpu = get_cpu();
+ vmx_vcpu_load(&vmx->vcpu, cpu);
+ vmx->vcpu.cpu = cpu;
+--
+2.7.4
+