aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch467
1 files changed, 467 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch
new file mode 100644
index 00000000..ca2a6d03
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch
@@ -0,0 +1,467 @@
+From f9db61de70bc09e29e9fe6da88ad5becd37c8aa4 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Wed, 15 Aug 2018 12:47:42 +0530
+Subject: [PATCH 92/95] KVM: SVM: add struct kvm_svm to hold SVM specific KVM
+ vars
+
+From 81811c162d4da1ececef14a1efc9602e86d29ef5
+
+Add struct kvm_svm, which is analagous to struct vcpu_svm, along with
+a helper to_kvm_svm() to retrieve kvm_svm from a struct kvm *. Move
+the SVM specific variables and struct definitions out of kvm_arch
+and into kvm_svm.
+
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/kvm_host.h | 18 ------
+ arch/x86/kvm/svm.c | 134 +++++++++++++++++++++++-----------------
+ 2 files changed, 79 insertions(+), 73 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 0b2bcd2..ed9b0da 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -752,15 +752,6 @@ enum kvm_irqchip_mode {
+ KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
+ };
+
+-struct kvm_sev_info {
+- bool active; /* SEV enabled guest */
+- unsigned int asid; /* ASID used for this guest */
+- unsigned int handle; /* SEV firmware handle */
+- int fd; /* SEV device fd */
+- unsigned long pages_locked; /* Number of pages locked */
+- struct list_head regions_list; /* List of registered regions */
+-};
+-
+ struct kvm_arch {
+ unsigned int n_used_mmu_pages;
+ unsigned int n_requested_mmu_pages;
+@@ -839,17 +830,8 @@ struct kvm_arch {
+
+ bool disabled_lapic_found;
+
+- /* Struct members for AVIC */
+- u32 avic_vm_id;
+- u32 ldr_mode;
+- struct page *avic_logical_id_table_page;
+- struct page *avic_physical_id_table_page;
+- struct hlist_node hnode;
+-
+ bool x2apic_format;
+ bool x2apic_broadcast_quirk_disabled;
+-
+- struct kvm_sev_info sev_info;
+ };
+
+ struct kvm_vm_stat {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index e6c4353..9264eed 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -132,6 +132,28 @@ static const u32 host_save_user_msrs[] = {
+
+ #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
+
++struct kvm_sev_info {
++ bool active; /* SEV enabled guest */
++ unsigned int asid; /* ASID used for this guest */
++ unsigned int handle; /* SEV firmware handle */
++ int fd; /* SEV device fd */
++ unsigned long pages_locked; /* Number of pages locked */
++ struct list_head regions_list; /* List of registered regions */
++};
++
++struct kvm_svm {
++ struct kvm kvm;
++
++ /* Struct members for AVIC */
++ u32 avic_vm_id;
++ u32 ldr_mode;
++ struct page *avic_logical_id_table_page;
++ struct page *avic_physical_id_table_page;
++ struct hlist_node hnode;
++
++ struct kvm_sev_info sev_info;
++};
++
+ struct kvm_vcpu;
+
+ struct nested_state {
+@@ -359,6 +381,12 @@ struct enc_region {
+ unsigned long size;
+ };
+
++
++static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
++{
++ return container_of(kvm, struct kvm_svm, kvm);
++}
++
+ static inline bool svm_sev_enabled(void)
+ {
+ return max_sev_asid;
+@@ -366,14 +394,14 @@ static inline bool svm_sev_enabled(void)
+
+ static inline bool sev_guest(struct kvm *kvm)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ return sev->active;
+ }
+
+ static inline int sev_get_asid(struct kvm *kvm)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ return sev->asid;
+ }
+@@ -1090,7 +1118,7 @@ static void disable_nmi_singlestep(struct vcpu_svm *svm)
+ }
+
+ /* Note:
+- * This hash table is used to map VM_ID to a struct kvm_arch,
++ * This hash table is used to map VM_ID to a struct kvm_svm,
+ * when handling AMD IOMMU GALOG notification to schedule in
+ * a particular vCPU.
+ */
+@@ -1107,7 +1135,7 @@ static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
+ static int avic_ga_log_notifier(u32 ga_tag)
+ {
+ unsigned long flags;
+- struct kvm_arch *ka = NULL;
++ struct kvm_svm *kvm_svm;
+ struct kvm_vcpu *vcpu = NULL;
+ u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
+ u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
+@@ -1115,13 +1143,10 @@ static int avic_ga_log_notifier(u32 ga_tag)
+ pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
+
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+- hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
+- struct kvm *kvm = container_of(ka, struct kvm, arch);
+- struct kvm_arch *vm_data = &kvm->arch;
+-
+- if (vm_data->avic_vm_id != vm_id)
++ hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
++ if (kvm_svm->avic_vm_id != vm_id)
+ continue;
+- vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
++ vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
+ break;
+ }
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+@@ -1338,10 +1363,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ static void avic_init_vmcb(struct vcpu_svm *svm)
+ {
+ struct vmcb *vmcb = svm->vmcb;
+- struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
+ phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
+- phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
+- phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
++ phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
++ phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
+
+ vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
+ vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
+@@ -1498,12 +1523,12 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
+ unsigned int index)
+ {
+ u64 *avic_physical_id_table;
+- struct kvm_arch *vm_data = &vcpu->kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+
+ if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
+ return NULL;
+
+- avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
++ avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
+
+ return &avic_physical_id_table[index];
+ }
+@@ -1586,7 +1611,7 @@ static void __sev_asid_free(int asid)
+
+ static void sev_asid_free(struct kvm *kvm)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ __sev_asid_free(sev->asid);
+ }
+@@ -1626,7 +1651,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+ unsigned long ulen, unsigned long *n,
+ int write)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ unsigned long npages, npinned, size;
+ unsigned long locked, lock_limit;
+ struct page **pages;
+@@ -1677,7 +1702,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+ static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
+ unsigned long npages)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ release_pages(pages, npages, 0);
+ kvfree(pages);
+@@ -1717,17 +1742,18 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
+
+ static struct kvm *svm_vm_alloc(void)
+ {
+- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
++ struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
++ return &kvm_svm->kvm;
+ }
+
+ static void svm_vm_free(struct kvm *kvm)
+ {
+- kfree(kvm);
++ kfree(to_kvm_svm(kvm));
+ }
+
+ static void sev_vm_destroy(struct kvm *kvm)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct list_head *head = &sev->regions_list;
+ struct list_head *pos, *q;
+
+@@ -1756,18 +1782,18 @@ static void sev_vm_destroy(struct kvm *kvm)
+ static void avic_vm_destroy(struct kvm *kvm)
+ {
+ unsigned long flags;
+- struct kvm_arch *vm_data = &kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
+
+ if (!avic)
+ return;
+
+- if (vm_data->avic_logical_id_table_page)
+- __free_page(vm_data->avic_logical_id_table_page);
+- if (vm_data->avic_physical_id_table_page)
+- __free_page(vm_data->avic_physical_id_table_page);
++ if (kvm_svm->avic_logical_id_table_page)
++ __free_page(kvm_svm->avic_logical_id_table_page);
++ if (kvm_svm->avic_physical_id_table_page)
++ __free_page(kvm_svm->avic_physical_id_table_page);
+
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+- hash_del(&vm_data->hnode);
++ hash_del(&kvm_svm->hnode);
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+ }
+
+@@ -1781,10 +1807,10 @@ static int avic_vm_init(struct kvm *kvm)
+ {
+ unsigned long flags;
+ int err = -ENOMEM;
+- struct kvm_arch *vm_data = &kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
++ struct kvm_svm *k2;
+ struct page *p_page;
+ struct page *l_page;
+- struct kvm_arch *ka;
+ u32 vm_id;
+
+ if (!avic)
+@@ -1795,7 +1821,7 @@ static int avic_vm_init(struct kvm *kvm)
+ if (!p_page)
+ goto free_avic;
+
+- vm_data->avic_physical_id_table_page = p_page;
++ kvm_svm->avic_physical_id_table_page = p_page;
+ clear_page(page_address(p_page));
+
+ /* Allocating logical APIC ID table (4KB) */
+@@ -1803,7 +1829,7 @@ static int avic_vm_init(struct kvm *kvm)
+ if (!l_page)
+ goto free_avic;
+
+- vm_data->avic_logical_id_table_page = l_page;
++ kvm_svm->avic_logical_id_table_page = l_page;
+ clear_page(page_address(l_page));
+
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+@@ -1815,15 +1841,13 @@ static int avic_vm_init(struct kvm *kvm)
+ }
+ /* Is it still in use? Only possible if wrapped at least once */
+ if (next_vm_id_wrapped) {
+- hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
+- struct kvm *k2 = container_of(ka, struct kvm, arch);
+- struct kvm_arch *vd2 = &k2->arch;
+- if (vd2->avic_vm_id == vm_id)
++ hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
++ if (k2->avic_vm_id == vm_id)
+ goto again;
+ }
+ }
+- vm_data->avic_vm_id = vm_id;
+- hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
++ kvm_svm->avic_vm_id = vm_id;
++ hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+
+ return 0;
+@@ -4355,7 +4379,7 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
+
+ static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
+ {
+- struct kvm_arch *vm_data = &vcpu->kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+ int index;
+ u32 *logical_apic_id_table;
+ int dlid = GET_APIC_LOGICAL_ID(ldr);
+@@ -4377,7 +4401,7 @@ static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
+ index = (cluster << 2) + apic;
+ }
+
+- logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
++ logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
+
+ return &logical_apic_id_table[index];
+ }
+@@ -4457,7 +4481,7 @@ static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
+ static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+- struct kvm_arch *vm_data = &vcpu->kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+ u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
+ u32 mod = (dfr >> 28) & 0xf;
+
+@@ -4466,11 +4490,11 @@ static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
+ * If this changes, we need to flush the AVIC logical
+ * APID id table.
+ */
+- if (vm_data->ldr_mode == mod)
++ if (kvm_svm->ldr_mode == mod)
+ return 0;
+
+- clear_page(page_address(vm_data->avic_logical_id_table_page));
+- vm_data->ldr_mode = mod;
++ clear_page(page_address(kvm_svm->avic_logical_id_table_page));
++ kvm_svm->ldr_mode = mod;
+
+ if (svm->ldr_reg)
+ avic_handle_ldr_update(vcpu);
+@@ -5105,7 +5129,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ /* Try to enable guest_mode in IRTE */
+ pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
+ AVIC_HPA_MASK);
+- pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
++ pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
+ svm->vcpu.vcpu_id);
+ pi.is_guest_mode = true;
+ pi.vcpu_data = &vcpu_info;
+@@ -5979,7 +6003,7 @@ static int sev_asid_new(void)
+
+ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ int asid, ret;
+
+ ret = -EBUSY;
+@@ -6044,14 +6068,14 @@ static int __sev_issue_cmd(int fd, int id, void *data, int *error)
+
+ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ return __sev_issue_cmd(sev->fd, id, data, error);
+ }
+
+ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_launch_start *start;
+ struct kvm_sev_launch_start params;
+ void *dh_blob, *session_blob;
+@@ -6149,7 +6173,7 @@ static int get_num_contig_pages(int idx, struct page **inpages,
+ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_launch_update_data params;
+ struct sev_data_launch_update_data *data;
+ struct page **inpages;
+@@ -6225,7 +6249,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ void __user *measure = (void __user *)(uintptr_t)argp->data;
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_launch_measure *data;
+ struct kvm_sev_launch_measure params;
+ void __user *p = NULL;
+@@ -6293,7 +6317,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+
+ static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_launch_finish *data;
+ int ret;
+
+@@ -6313,7 +6337,7 @@ static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+
+ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_guest_status params;
+ struct sev_data_guest_status *data;
+ int ret;
+@@ -6345,7 +6369,7 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+ unsigned long dst, int size,
+ int *error, bool enc)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_dbg *data;
+ int ret;
+
+@@ -6577,7 +6601,7 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+
+ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_launch_secret *data;
+ struct kvm_sev_launch_secret params;
+ struct page **pages;
+@@ -6701,7 +6725,7 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ static int svm_register_enc_region(struct kvm *kvm,
+ struct kvm_enc_region *range)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct enc_region *region;
+ int ret = 0;
+
+@@ -6743,7 +6767,7 @@ static int svm_register_enc_region(struct kvm *kvm,
+ static struct enc_region *
+ find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct list_head *head = &sev->regions_list;
+ struct enc_region *i;
+
+--
+2.7.4
+