aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch231
1 files changed, 231 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch
new file mode 100644
index 00000000..0f0cdf18
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch
@@ -0,0 +1,231 @@
+From c8d6a6308c0d23867fa09ab68bf79dc06f4e92f5 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 22:39:29 +0530
+Subject: [PATCH 81/95] KVM: SVM: Pin guest memory when SEV is active
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From 1e80fdc09d121d8327cdf62eefbb5abadddca792
+
+The SEV memory encryption engine uses a tweak such that two identical
+plaintext pages at different location will have different ciphertext.
+So swapping or moving ciphertext of two pages will not result in
+plaintext being swapped. Relocating (or migrating) physical backing
+pages for a SEV guest will require some additional steps. The current SEV
+key management spec does not provide commands to swap or migrate (move)
+ciphertext pages. For now, we pin the guest memory registered through
+KVM_MEMORY_ENCRYPT_REG_REGION ioctl.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/svm.c | 132 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 133 insertions(+)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 8493c469c..13894c0 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -758,6 +758,7 @@ struct kvm_sev_info {
+ unsigned int handle; /* SEV firmware handle */
+ int fd; /* SEV device fd */
+ unsigned long pages_locked; /* Number of pages locked */
++ struct list_head regions_list; /* List of registered regions */
+ };
+
+ struct kvm_arch {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 83a4dfe..2dcfd9f 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -349,6 +349,14 @@ static unsigned int min_sev_asid;
+ static unsigned long *sev_asid_bitmap;
+ #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+
++struct enc_region {
++ struct list_head list;
++ unsigned long npages;
++ struct page **pages;
++ unsigned long uaddr;
++ unsigned long size;
++};
++
+ static inline bool svm_sev_enabled(void)
+ {
+ return max_sev_asid;
+@@ -1688,13 +1696,46 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
+ }
+ }
+
++static void __unregister_enc_region_locked(struct kvm *kvm,
++ struct enc_region *region)
++{
++ /*
++ * The guest may change the memory encryption attribute from C=0 -> C=1
++ * or vice versa for this memory range. Lets make sure caches are
++ * flushed to ensure that guest data gets written into memory with
++ * correct C-bit.
++ */
++ sev_clflush_pages(region->pages, region->npages);
++
++ sev_unpin_memory(kvm, region->pages, region->npages);
++ list_del(&region->list);
++ kfree(region);
++}
++
+ static void sev_vm_destroy(struct kvm *kvm)
+ {
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct list_head *head = &sev->regions_list;
++ struct list_head *pos, *q;
+
+ if (!sev_guest(kvm))
+ return;
+
++ mutex_lock(&kvm->lock);
++
++ /*
++ * if userspace was terminated before unregistering the memory regions
++ * then lets unpin all the registered memory.
++ */
++ if (!list_empty(head)) {
++ list_for_each_safe(pos, q, head) {
++ __unregister_enc_region_locked(kvm,
++ list_entry(pos, struct enc_region, list));
++ }
++ }
++
++ mutex_unlock(&kvm->lock);
++
+ sev_unbind_asid(kvm, sev->handle);
+ sev_asid_free(kvm);
+ }
+@@ -5920,6 +5961,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+
+ sev->active = true;
+ sev->asid = asid;
++ INIT_LIST_HEAD(&sev->regions_list);
+
+ return 0;
+
+@@ -6622,6 +6664,94 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ return r;
+ }
+
++static int svm_register_enc_region(struct kvm *kvm,
++ struct kvm_enc_region *range)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct enc_region *region;
++ int ret = 0;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ region = kzalloc(sizeof(*region), GFP_KERNEL);
++ if (!region)
++ return -ENOMEM;
++
++ region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
++ if (!region->pages) {
++ ret = -ENOMEM;
++ goto e_free;
++ }
++
++ /*
++ * The guest may change the memory encryption attribute from C=0 -> C=1
++ * or vice versa for this memory range. Lets make sure caches are
++ * flushed to ensure that guest data gets written into memory with
++ * correct C-bit.
++ */
++ sev_clflush_pages(region->pages, region->npages);
++
++ region->uaddr = range->addr;
++ region->size = range->size;
++
++ mutex_lock(&kvm->lock);
++ list_add_tail(&region->list, &sev->regions_list);
++ mutex_unlock(&kvm->lock);
++
++ return ret;
++
++e_free:
++ kfree(region);
++ return ret;
++}
++
++static struct enc_region *
++find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct list_head *head = &sev->regions_list;
++ struct enc_region *i;
++
++ list_for_each_entry(i, head, list) {
++ if (i->uaddr == range->addr &&
++ i->size == range->size)
++ return i;
++ }
++
++ return NULL;
++}
++
++
++static int svm_unregister_enc_region(struct kvm *kvm,
++ struct kvm_enc_region *range)
++{
++ struct enc_region *region;
++ int ret;
++
++ mutex_lock(&kvm->lock);
++
++ if (!sev_guest(kvm)) {
++ ret = -ENOTTY;
++ goto failed;
++ }
++
++ region = find_enc_region(kvm, range);
++ if (!region) {
++ ret = -EINVAL;
++ goto failed;
++ }
++
++ __unregister_enc_region_locked(kvm, region);
++
++ mutex_unlock(&kvm->lock);
++ return 0;
++
++failed:
++ mutex_unlock(&kvm->lock);
++ return ret;
++}
++
+ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .cpu_has_kvm_support = has_svm,
+ .disabled_by_bios = is_disabled,
+@@ -6734,6 +6864,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .update_pi_irte = svm_update_pi_irte,
+ .setup_mce = svm_setup_mce,
+ .mem_enc_op = svm_mem_enc_op,
++ .mem_enc_reg_region = svm_register_enc_region,
++ .mem_enc_unreg_region = svm_unregister_enc_region,
+ };
+
+ static int __init svm_init(void)
+--
+2.7.4
+