aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.9.21/0020-KVM-x86-Add-IBPB-support.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.9.21/0020-KVM-x86-Add-IBPB-support.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0020-KVM-x86-Add-IBPB-support.patch352
1 files changed, 0 insertions, 352 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0020-KVM-x86-Add-IBPB-support.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0020-KVM-x86-Add-IBPB-support.patch
deleted file mode 100644
index 731a182a..00000000
--- a/common/recipes-kernel/linux/linux-yocto-4.9.21/0020-KVM-x86-Add-IBPB-support.patch
+++ /dev/null
@@ -1,352 +0,0 @@
-From b70d7889c078c97d11ae6412760f3231fda324cd Mon Sep 17 00:00:00 2001
-From: Ashok Raj <ashok.raj@intel.com>
-Date: Thu, 1 Feb 2018 22:59:43 +0100
-Subject: [PATCH 20/33] KVM/x86: Add IBPB support
-
-(cherry picked from commit 15d45071523d89b3fb7372e2135fbd72f6af9506)
-
-The Indirect Branch Predictor Barrier (IBPB) is an indirect branch
-control mechanism. It keeps earlier branches from influencing
-later ones.
-
-Unlike IBRS and STIBP, IBPB does not define a new mode of operation.
-It's a command that ensures predicted branch targets aren't used after
-the barrier. Although IBRS and IBPB are enumerated by the same CPUID
-enumeration, IBPB is very different.
-
-IBPB helps mitigate against three potential attacks:
-
-* Mitigate guests from being attacked by other guests.
- - This is addressed by issing IBPB when we do a guest switch.
-
-* Mitigate attacks from guest/ring3->host/ring3.
- These would require a IBPB during context switch in host, or after
- VMEXIT. The host process has two ways to mitigate
- - Either it can be compiled with retpoline
- - If its going through context switch, and has set !dumpable then
- there is a IBPB in that path.
- (Tim's patch: https://patchwork.kernel.org/patch/10192871)
- - The case where after a VMEXIT you return back to Qemu might make
- Qemu attackable from guest when Qemu isn't compiled with retpoline.
- There are issues reported when doing IBPB on every VMEXIT that resulted
- in some tsc calibration woes in guest.
-
-* Mitigate guest/ring0->host/ring0 attacks.
- When host kernel is using retpoline it is safe against these attacks.
- If host kernel isn't using retpoline we might need to do a IBPB flush on
- every VMEXIT.
-
-Even when using retpoline for indirect calls, in certain conditions 'ret'
-can use the BTB on Skylake-era CPUs. There are other mitigations
-available like RSB stuffing/clearing.
-
-* IBPB is issued only for SVM during svm_free_vcpu().
- VMX has a vmclear and SVM doesn't. Follow discussion here:
- https://lkml.org/lkml/2018/1/15/146
-
-Please refer to the following spec for more details on the enumeration
-and control.
-
-Refer here to get documentation about mitigations.
-
-https://software.intel.com/en-us/side-channel-security-support
-
-[peterz: rebase and changelog rewrite]
-[karahmed: - rebase
- - vmx: expose PRED_CMD if guest has it in CPUID
- - svm: only pass through IBPB if guest has it in CPUID
- - vmx: support !cpu_has_vmx_msr_bitmap()]
- - vmx: support nested]
-[dwmw2: Expose CPUID bit too (AMD IBPB only for now as we lack IBRS)
- PRED_CMD is a write-only MSR]
-
-Signed-off-by: Ashok Raj <ashok.raj@intel.com>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Cc: Andrea Arcangeli <aarcange@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: kvm@vger.kernel.org
-Cc: Asit Mallick <asit.k.mallick@intel.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Andy Lutomirski <luto@kernel.org>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
-Cc: Greg KH <gregkh@linuxfoundation.org>
-Cc: Jun Nakajima <jun.nakajima@intel.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: Dan Williams <dan.j.williams@intel.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Link: http://lkml.kernel.org/r/1515720739-43819-6-git-send-email-ashok.raj@intel.com
-Link: https://lkml.kernel.org/r/1517522386-18410-3-git-send-email-karahmed@amazon.de
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/x86/kvm/cpuid.c | 11 +++++++-
- arch/x86/kvm/cpuid.h | 12 ++++++++
- arch/x86/kvm/svm.c | 28 +++++++++++++++++++
- arch/x86/kvm/vmx.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++--
- 4 files changed, 127 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
-index afa7bbb..42323be 100644
---- a/arch/x86/kvm/cpuid.c
-+++ b/arch/x86/kvm/cpuid.c
-@@ -355,6 +355,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
- F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
- 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
-
-+ /* cpuid 0x80000008.ebx */
-+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-+ F(IBPB);
-+
- /* cpuid 0xC0000001.edx */
- const u32 kvm_cpuid_C000_0001_edx_x86_features =
- F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
-@@ -607,7 +611,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
- if (!g_phys_as)
- g_phys_as = phys_as;
- entry->eax = g_phys_as | (virt_as << 8);
-- entry->ebx = entry->edx = 0;
-+ entry->edx = 0;
-+ /* IBPB isn't necessarily present in hardware cpuid */
-+ if (boot_cpu_has(X86_FEATURE_IBPB))
-+ entry->ebx |= F(IBPB);
-+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
-+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
- break;
- }
- case 0x80000019:
-diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
-index 35058c2..f4a2a1a 100644
---- a/arch/x86/kvm/cpuid.h
-+++ b/arch/x86/kvm/cpuid.h
-@@ -152,6 +152,18 @@ static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
- return best && (best->edx & bit(X86_FEATURE_RDTSCP));
- }
-
-+static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
-+{
-+ struct kvm_cpuid_entry2 *best;
-+
-+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
-+ if (best && (best->ebx & bit(X86_FEATURE_IBPB)))
-+ return true;
-+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
-+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
-+}
-+
-+
- /*
- * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
- */
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 491f077..43e45b9 100644
---- a/arch/x86/kvm/svm.c
-+++ b/arch/x86/kvm/svm.c
-@@ -248,6 +248,7 @@ static const struct svm_direct_access_msrs {
- { .index = MSR_CSTAR, .always = true },
- { .index = MSR_SYSCALL_MASK, .always = true },
- #endif
-+ { .index = MSR_IA32_PRED_CMD, .always = false },
- { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
- { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
- { .index = MSR_IA32_LASTINTFROMIP, .always = false },
-@@ -510,6 +511,7 @@ struct svm_cpu_data {
- struct kvm_ldttss_desc *tss_desc;
-
- struct page *save_area;
-+ struct vmcb *current_vmcb;
- };
-
- static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
-@@ -1641,11 +1643,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
- __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
- kvm_vcpu_uninit(vcpu);
- kmem_cache_free(kvm_vcpu_cache, svm);
-+ /*
-+ * The vmcb page can be recycled, causing a false negative in
-+ * svm_vcpu_load(). So do a full IBPB now.
-+ */
-+ indirect_branch_prediction_barrier();
- }
-
- static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
- {
- struct vcpu_svm *svm = to_svm(vcpu);
-+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
- int i;
-
- if (unlikely(cpu != vcpu->cpu)) {
-@@ -1674,6 +1682,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
- if (static_cpu_has(X86_FEATURE_RDTSCP))
- wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
-
-+ if (sd->current_vmcb != svm->vmcb) {
-+ sd->current_vmcb = svm->vmcb;
-+ indirect_branch_prediction_barrier();
-+ }
- avic_vcpu_load(vcpu, cpu);
- }
-
-@@ -3587,6 +3599,22 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
- case MSR_IA32_TSC:
- kvm_write_tsc(vcpu, msr);
- break;
-+ case MSR_IA32_PRED_CMD:
-+ if (!msr->host_initiated &&
-+ !guest_cpuid_has_ibpb(vcpu))
-+ return 1;
-+
-+ if (data & ~PRED_CMD_IBPB)
-+ return 1;
-+
-+ if (!data)
-+ break;
-+
-+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
-+ if (is_guest_mode(vcpu))
-+ break;
-+ set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
-+ break;
- case MSR_STAR:
- svm->vmcb->save.star = data;
- break;
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index c6a7563..855df75 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -550,6 +550,7 @@ struct vcpu_vmx {
- u64 msr_host_kernel_gs_base;
- u64 msr_guest_kernel_gs_base;
- #endif
-+
- u32 vm_entry_controls_shadow;
- u32 vm_exit_controls_shadow;
- /*
-@@ -911,6 +912,8 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
- static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
- static int alloc_identity_pagetable(struct kvm *kvm);
- static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
-+static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
-+ u32 msr, int type);
-
- static DEFINE_PER_CPU(struct vmcs *, vmxarea);
- static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
-@@ -1841,6 +1844,29 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
- vmcs_write32(EXCEPTION_BITMAP, eb);
- }
-
-+/*
-+ * Check if MSR is intercepted for L01 MSR bitmap.
-+ */
-+static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
-+{
-+ unsigned long *msr_bitmap;
-+ int f = sizeof(unsigned long);
-+
-+ if (!cpu_has_vmx_msr_bitmap())
-+ return true;
-+
-+ msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
-+
-+ if (msr <= 0x1fff) {
-+ return !!test_bit(msr, msr_bitmap + 0x800 / f);
-+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
-+ msr &= 0x1fff;
-+ return !!test_bit(msr, msr_bitmap + 0xc00 / f);
-+ }
-+
-+ return true;
-+}
-+
- static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
- unsigned long entry, unsigned long exit)
- {
-@@ -2252,6 +2278,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
- if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
- per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
- vmcs_load(vmx->loaded_vmcs->vmcs);
-+ indirect_branch_prediction_barrier();
- }
-
- if (!already_loaded) {
-@@ -3048,6 +3075,33 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
- case MSR_IA32_TSC:
- kvm_write_tsc(vcpu, msr_info);
- break;
-+ case MSR_IA32_PRED_CMD:
-+ if (!msr_info->host_initiated &&
-+ !guest_cpuid_has_ibpb(vcpu))
-+ return 1;
-+
-+ if (data & ~PRED_CMD_IBPB)
-+ return 1;
-+
-+ if (!data)
-+ break;
-+
-+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
-+
-+ /*
-+ * For non-nested:
-+ * When it's written (to non-zero) for the first time, pass
-+ * it through.
-+ *
-+ * For nested:
-+ * The handling of the MSR bitmap for L2 guests is done in
-+ * nested_vmx_merge_msr_bitmap. We should not touch the
-+ * vmcs02.msr_bitmap here since it gets completely overwritten
-+ * in the merging.
-+ */
-+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
-+ MSR_TYPE_W);
-+ break;
- case MSR_IA32_CR_PAT:
- if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-@@ -9406,9 +9460,23 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
- struct page *page;
- unsigned long *msr_bitmap_l1;
- unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
-+ /*
-+ * pred_cmd is trying to verify two things:
-+ *
-+ * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
-+ * ensures that we do not accidentally generate an L02 MSR bitmap
-+ * from the L12 MSR bitmap that is too permissive.
-+ * 2. That L1 or L2s have actually used the MSR. This avoids
-+ * unnecessarily merging of the bitmap if the MSR is unused. This
-+ * works properly because we only update the L01 MSR bitmap lazily.
-+ * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
-+ * updated to reflect this when L1 (or its L2s) actually write to
-+ * the MSR.
-+ */
-+ bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
-
-- /* This shortcut is ok because we support only x2APIC MSRs so far. */
-- if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
-+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
-+ !pred_cmd)
- return false;
-
- page = nested_get_page(vcpu, vmcs12->msr_bitmap);
-@@ -9443,6 +9511,13 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
- MSR_TYPE_W);
- }
- }
-+
-+ if (pred_cmd)
-+ nested_vmx_disable_intercept_for_msr(
-+ msr_bitmap_l1, msr_bitmap_l0,
-+ MSR_IA32_PRED_CMD,
-+ MSR_TYPE_W);
-+
- kunmap(page);
- nested_release_page_clean(page);
-
---
-2.7.4
-