aboutsummaryrefslogtreecommitdiffstats
path: root/meta-e3000/recipes-kernel/linux/linux-yocto-4.14.71/0071-KVM-SVM-VMRUN-should-use-associated-ASID-when-SEV-is.patch
blob: 5cf128d887e19410acb7e1bfc0746aa11defae80 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
From 6d4852ea9d190fbb0607d7b067662b30dac7983e Mon Sep 17 00:00:00 2001
From: Sudheesh Mavila <sudheesh.mavila@amd.com>
Date: Tue, 14 Aug 2018 22:13:30 +0530
Subject: [PATCH 71/95] KVM: SVM: VMRUN should use associated ASID when SEV is
 enabled
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

 commit 70cd94e60c733e3afc18b0e6aab789c13b5571da

    SEV hardware uses ASIDs to associate a memory encryption key with a
    guest VM. During guest creation, a SEV VM uses the SEV_CMD_ACTIVATE
    command to bind a particular ASID to the guest. Lets make sure that the
    VMCB is programmed with the bound ASID before a VMRUN.

    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: Ingo Molnar <mingo@redhat.com>
    Cc: "H. Peter Anvin" <hpa@zytor.com>
    Cc: Paolo Bonzini <pbonzini@redhat.com>
    Cc: "Radim Krčmář" <rkrcmar@redhat.com>
    Cc: Joerg Roedel <joro@8bytes.org>
    Cc: Borislav Petkov <bp@suse.de>
    Cc: Tom Lendacky <thomas.lendacky@amd.com>
    Cc: x86@kernel.org
    Cc: kvm@vger.kernel.org
    Cc: linux-kernel@vger.kernel.org
    Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
    Reviewed-by: Borislav Petkov <bp@suse.de>

Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
---
 arch/x86/kvm/svm.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 56 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c41635b..f4fc0b2 100755
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -225,6 +225,9 @@ struct vcpu_svm {
 	 */
 	struct list_head ir_list;
 	spinlock_t ir_list_lock;
+
+	/* which host CPU was used for running this vcpu */
+	unsigned int last_cpu;
 };
 
 /*
@@ -355,6 +358,13 @@ static inline bool sev_guest(struct kvm *kvm)
 	return sev->active;
 }
 
+static inline int sev_get_asid(struct kvm *kvm)
+{
+	struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+	return sev->asid;
+}
+
 static inline void mark_all_dirty(struct vmcb *vmcb)
 {
 	vmcb->control.clean = 0;
@@ -566,6 +576,9 @@ struct svm_cpu_data {
 
 	struct page *save_area;
 	struct vmcb *current_vmcb;
+
+	/* index = sev_asid, value = vmcb pointer */
+	struct vmcb **sev_vmcbs;
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -879,6 +892,7 @@ static void svm_cpu_uninit(int cpu)
 		return;
 
 	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
+	kfree(sd->sev_vmcbs);
 	__free_page(sd->save_area);
 	kfree(sd);
 }
@@ -892,11 +906,18 @@ static int svm_cpu_init(int cpu)
 	if (!sd)
 		return -ENOMEM;
 	sd->cpu = cpu;
-	sd->save_area = alloc_page(GFP_KERNEL);
 	r = -ENOMEM;
+	sd->save_area = alloc_page(GFP_KERNEL);
 	if (!sd->save_area)
 		goto err_1;
 
+	if (svm_sev_enabled()) {
+		r = -ENOMEM;
+		sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
+		if (!sd->sev_vmcbs)
+			goto err_1;
+	}
+
 	per_cpu(svm_data, cpu) = sd;
 
 	return 0;
@@ -1535,10 +1556,16 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
 
 static void __sev_asid_free(int asid)
 {
-	int pos;
+	struct svm_cpu_data *sd;
+	int cpu, pos;
 
 	pos = asid - 1;
 	clear_bit(pos, sev_asid_bitmap);
+
+	for_each_possible_cpu(cpu) {
+		sd = per_cpu(svm_data, cpu);
+		sd->sev_vmcbs[pos] = NULL;
+	}
 }
 
 static void sev_asid_free(struct kvm *kvm)
@@ -4603,12 +4630,39 @@ static void reload_tss(struct kvm_vcpu *vcpu)
 	load_TR_desc();
 }
 
+static void pre_sev_run(struct vcpu_svm *svm, int cpu)
+{
+	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+	int asid = sev_get_asid(svm->vcpu.kvm);
+
+	/* Assign the asid allocated with this SEV guest */
+	svm->vmcb->control.asid = asid;
+
+	/*
+	 * Flush guest TLB:
+	 *
+	 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
+	 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
+	 */
+	if (sd->sev_vmcbs[asid] == svm->vmcb &&
+	    svm->last_cpu == cpu)
+		return;
+
+	svm->last_cpu = cpu;
+	sd->sev_vmcbs[asid] = svm->vmcb;
+	svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+	mark_dirty(svm->vmcb, VMCB_ASID);
+}
+
 static void pre_svm_run(struct vcpu_svm *svm)
 {
 	int cpu = raw_smp_processor_id();
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
+	if (sev_guest(svm->vcpu.kvm))
+		return pre_sev_run(svm, cpu);
+
 	/* FIXME: handle wraparound of asid_generation */
 	if (svm->asid_generation != sd->asid_generation)
 		new_asid(svm, sd);
-- 
2.7.4