aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-mark-vmcs12-pages-dirty-on-L2-exit.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-mark-vmcs12-pages-dirty-on-L2-exit.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-mark-vmcs12-pages-dirty-on-L2-exit.patch119
1 files changed, 119 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-mark-vmcs12-pages-dirty-on-L2-exit.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-mark-vmcs12-pages-dirty-on-L2-exit.patch
new file mode 100644
index 00000000..e7f44b1b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-mark-vmcs12-pages-dirty-on-L2-exit.patch
@@ -0,0 +1,119 @@
+From 50fefe1aabf115927dbe944d4607d3696ed2773e Mon Sep 17 00:00:00 2001
+From: David Matlack <dmatlack@google.com>
+Date: Tue, 1 Aug 2017 14:00:40 -0700
+Subject: [PATCH 16/33] KVM: nVMX: mark vmcs12 pages dirty on L2 exit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+(cherry picked from commit c9f04407f2e0b3fc9ff7913c65fcfcb0a4b61570)
+
+The host physical addresses of L1's Virtual APIC Page and Posted
+Interrupt descriptor are loaded into the VMCS02. The CPU may write
+to these pages via their host physical address while L2 is running,
+bypassing address-translation-based dirty tracking (e.g. EPT write
+protection). Mark them dirty on every exit from L2 to prevent them
+from getting out of sync with dirty tracking.
+
+Also mark the virtual APIC page and the posted interrupt descriptor
+dirty when KVM is virtualizing posted interrupt processing.
+
+Signed-off-by: David Matlack <dmatlack@google.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 53 +++++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 43 insertions(+), 10 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 13dc454..2e88fd1 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4722,6 +4722,28 @@ static bool vmx_get_enable_apicv(void)
+ return enable_apicv;
+ }
+
++static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
++{
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
++ gfn_t gfn;
++
++ /*
++ * Don't need to mark the APIC access page dirty; it is never
++ * written to by the CPU during APIC virtualization.
++ */
++
++ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
++ gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
++ kvm_vcpu_mark_page_dirty(vcpu, gfn);
++ }
++
++ if (nested_cpu_has_posted_intr(vmcs12)) {
++ gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
++ kvm_vcpu_mark_page_dirty(vcpu, gfn);
++ }
++}
++
++
+ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -4729,18 +4751,15 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+ void *vapic_page;
+ u16 status;
+
+- if (vmx->nested.pi_desc &&
+- vmx->nested.pi_pending) {
+- vmx->nested.pi_pending = false;
+- if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+- return;
+-
+- max_irr = find_last_bit(
+- (unsigned long *)vmx->nested.pi_desc->pir, 256);
++ if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
++ return;
+
+- if (max_irr == 256)
+- return;
++ vmx->nested.pi_pending = false;
++ if (!pi_test_and_clear_on(vmx->nested.pi_desc))
++ return;
+
++ max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
++ if (max_irr != 256) {
+ vapic_page = kmap(vmx->nested.virtual_apic_page);
+ __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
+ kunmap(vmx->nested.virtual_apic_page);
+@@ -4752,6 +4771,8 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+ vmcs_write16(GUEST_INTR_STATUS, status);
+ }
+ }
++
++ nested_mark_vmcs12_pages_dirty(vcpu);
+ }
+
+ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+@@ -8009,6 +8030,18 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+ vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
+ KVM_ISA_VMX);
+
++ /*
++ * The host physical addresses of some pages of guest memory
++ * are loaded into VMCS02 (e.g. L1's Virtual APIC Page). The CPU
++ * may write to these pages via their host physical address while
++ * L2 is running, bypassing any address-translation-based dirty
++ * tracking (e.g. EPT write protection).
++ *
++ * Mark them dirty on every exit from L2 to prevent them from
++ * getting out of sync with dirty tracking.
++ */
++ nested_mark_vmcs12_pages_dirty(vcpu);
++
+ if (vmx->nested.nested_run_pending)
+ return false;
+
+--
+2.7.4
+