aboutsummaryrefslogtreecommitdiffstats
path: root/arch/riscv/kvm/vcpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kvm/vcpu.c')
-rw-r--r--arch/riscv/kvm/vcpu.c297
1 files changed, 235 insertions, 62 deletions
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 71ebbc4821f0..8bd9f2a8a0b9 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -21,6 +21,7 @@
#include <asm/csr.h>
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
+#include <asm/sbi.h>
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
@@ -57,11 +58,14 @@ static const unsigned long kvm_isa_ext_arr[] = {
[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
+ KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVPBMT),
+ KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZICBOM),
+ KVM_ISA_EXT_ARR(ZICBOZ),
};
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
@@ -95,9 +99,11 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
+ case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+ case KVM_RISCV_ISA_EXT_ZBB:
return false;
default:
break;
@@ -134,8 +140,12 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_timer_reset(vcpu);
- WRITE_ONCE(vcpu->arch.irqs_pending, 0);
- WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
+ kvm_riscv_vcpu_aia_reset(vcpu);
+
+ bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
+ bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
+
+ kvm_riscv_vcpu_pmu_reset(vcpu);
vcpu->arch.hfence_head = 0;
vcpu->arch.hfence_tail = 0;
@@ -154,6 +164,7 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
+ int rc;
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
unsigned long host_isa, i;
@@ -171,6 +182,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
set_bit(host_isa, vcpu->arch.isa);
}
+ /* Setup vendor, arch, and implementation details */
+ vcpu->arch.mvendorid = sbi_get_mvendorid();
+ vcpu->arch.marchid = sbi_get_marchid();
+ vcpu->arch.mimpid = sbi_get_mimpid();
+
/* Setup VCPU hfence queue */
spin_lock_init(&vcpu->arch.hfence_lock);
@@ -188,6 +204,14 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);
+ /* setup performance monitoring */
+ kvm_riscv_vcpu_pmu_init(vcpu);
+
+ /* Setup VCPU AIA */
+ rc = kvm_riscv_vcpu_aia_init(vcpu);
+ if (rc)
+ return rc;
+
/* Reset VCPU */
kvm_riscv_reset_vcpu(vcpu);
@@ -207,9 +231,14 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ /* Cleanup VCPU AIA context */
+ kvm_riscv_vcpu_aia_deinit(vcpu);
+
/* Cleanup VCPU timer */
kvm_riscv_vcpu_timer_deinit(vcpu);
+ kvm_riscv_vcpu_pmu_deinit(vcpu);
+
/* Free unused pages pre-allocated for G-stage page table mappings */
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}
@@ -270,6 +299,20 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
return -EINVAL;
reg_val = riscv_cbom_block_size;
break;
+ case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+ if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ return -EINVAL;
+ reg_val = riscv_cboz_block_size;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+ reg_val = vcpu->arch.mvendorid;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(marchid):
+ reg_val = vcpu->arch.marchid;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(mimpid):
+ reg_val = vcpu->arch.mimpid;
+ break;
default:
return -EINVAL;
}
@@ -296,12 +339,15 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
- /* This ONE REG interface is only defined for single letter extensions */
- if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
- return -EINVAL;
-
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
+ /*
+ * This ONE REG interface is only defined for
+ * single letter extensions.
+ */
+ if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
+ return -EINVAL;
+
if (!vcpu->arch.ran_atleast_once) {
/* Ignore the enable/disable request for certain extensions */
for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
@@ -329,6 +375,26 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
return -EOPNOTSUPP;
+ case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+ return -EOPNOTSUPP;
+ case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+ if (!vcpu->arch.ran_atleast_once)
+ vcpu->arch.mvendorid = reg_val;
+ else
+ return -EBUSY;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(marchid):
+ if (!vcpu->arch.ran_atleast_once)
+ vcpu->arch.marchid = reg_val;
+ else
+ return -EBUSY;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(mimpid):
+ if (!vcpu->arch.ran_atleast_once)
+ vcpu->arch.mimpid = reg_val;
+ else
+ return -EBUSY;
+ break;
default:
return -EINVAL;
}
@@ -404,27 +470,76 @@ static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
return 0;
}
+static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *out_val)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+ return -EINVAL;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
+ kvm_riscv_vcpu_flush_interrupts(vcpu);
+ *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
+ *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
+ } else
+ *out_val = ((unsigned long *)csr)[reg_num];
+
+ return 0;
+}
+
+static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long reg_val)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+ return -EINVAL;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
+ reg_val &= VSIP_VALID_MASK;
+ reg_val <<= VSIP_TO_HVIP_SHIFT;
+ }
+
+ ((unsigned long *)csr)[reg_num] = reg_val;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
+ WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
+
+ return 0;
+}
+
static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
- unsigned long reg_val;
+ unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
- kvm_riscv_vcpu_flush_interrupts(vcpu);
- reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
- } else
- reg_val = ((unsigned long *)csr)[reg_num];
+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_CSR_GENERAL:
+ rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
+ break;
+ case KVM_REG_RISCV_CSR_AIA:
+ rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return rc;
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@@ -435,31 +550,35 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
- unsigned long reg_val;
+ unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
- reg_val &= VSIP_VALID_MASK;
- reg_val <<= VSIP_TO_HVIP_SHIFT;
+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_CSR_GENERAL:
+ rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
+ break;
+ case KVM_REG_RISCV_CSR_AIA:
+ rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
}
-
- ((unsigned long *)csr)[reg_num] = reg_val;
-
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
- WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
+ if (rc)
+ return rc;
return 0;
}
@@ -541,22 +660,28 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
+ switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
+ case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
+ case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
+ case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
+ case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
+ case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
+ case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
+ case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
+ case KVM_REG_RISCV_SBI_EXT:
+ return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
+ default:
+ break;
+ }
return -EINVAL;
}
@@ -564,22 +689,28 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
+ switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
+ case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
+ case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
+ case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
+ case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
+ case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
+ case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
+ case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
+ case KVM_REG_RISCV_SBI_EXT:
+ return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
+ default:
+ break;
+ }
return -EINVAL;
}
@@ -677,13 +808,16 @@ void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
unsigned long mask, val;
- if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
- mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
- val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
+ if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
+ mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
+ val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
csr->hvip &= ~mask;
csr->hvip |= val;
}
+
+ /* Flush AIA high interrupts */
+ kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
}
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
@@ -700,29 +834,38 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
if (hvip & (1UL << IRQ_VS_SOFT)) {
if (!test_and_set_bit(IRQ_VS_SOFT,
- &v->irqs_pending_mask))
- set_bit(IRQ_VS_SOFT, &v->irqs_pending);
+ v->irqs_pending_mask))
+ set_bit(IRQ_VS_SOFT, v->irqs_pending);
} else {
if (!test_and_set_bit(IRQ_VS_SOFT,
- &v->irqs_pending_mask))
- clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
+ v->irqs_pending_mask))
+ clear_bit(IRQ_VS_SOFT, v->irqs_pending);
}
}
+ /* Sync-up AIA high interrupts */
+ kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
+
/* Sync-up timer CSRs */
kvm_riscv_vcpu_timer_sync(vcpu);
}
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
- if (irq != IRQ_VS_SOFT &&
+ /*
+ * We only allow VS-mode software, timer, and external
+ * interrupts when irq is one of the local interrupts
+ * defined by RISC-V privilege specification.
+ */
+ if (irq < IRQ_LOCAL_MAX &&
+ irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT)
return -EINVAL;
- set_bit(irq, &vcpu->arch.irqs_pending);
+ set_bit(irq, vcpu->arch.irqs_pending);
smp_mb__before_atomic();
- set_bit(irq, &vcpu->arch.irqs_pending_mask);
+ set_bit(irq, vcpu->arch.irqs_pending_mask);
kvm_vcpu_kick(vcpu);
@@ -731,24 +874,37 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
- if (irq != IRQ_VS_SOFT &&
+ /*
+ * We only allow VS-mode software, timer, and external
+ * interrupts when irq is one of the local interrupts
+ * defined by RISC-V privilege specification.
+ */
+ if (irq < IRQ_LOCAL_MAX &&
+ irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT)
return -EINVAL;
- clear_bit(irq, &vcpu->arch.irqs_pending);
+ clear_bit(irq, vcpu->arch.irqs_pending);
smp_mb__before_atomic();
- set_bit(irq, &vcpu->arch.irqs_pending_mask);
+ set_bit(irq, vcpu->arch.irqs_pending_mask);
return 0;
}
-bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
+bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
- unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
- << VSIP_TO_HVIP_SHIFT) & mask;
+ unsigned long ie;
+
+ ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
+ << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
+ ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
+ (unsigned long)mask;
+ if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
+ return true;
- return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
+ /* Check AIA high interrupts */
+ return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
}
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
@@ -814,6 +970,9 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
if (riscv_isa_extension_available(isa, ZICBOM))
henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
+ if (riscv_isa_extension_available(isa, ZICBOZ))
+ henvcfg |= ENVCFG_CBZE;
+
csr_write(CSR_HENVCFG, henvcfg);
#ifdef CONFIG_32BIT
csr_write(CSR_HENVCFGH, henvcfg >> 32);
@@ -844,6 +1003,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
vcpu->arch.isa);
+ kvm_riscv_vcpu_aia_load(vcpu, cpu);
+
vcpu->cpu = cpu;
}
@@ -853,6 +1014,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
+ kvm_riscv_vcpu_aia_put(vcpu);
+
kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
@@ -920,6 +1083,7 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
csr_write(CSR_HVIP, csr->hvip);
+ kvm_riscv_vcpu_aia_update_hvip(vcpu);
}
/*
@@ -984,13 +1148,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
while (ret > 0) {
/* Check conditions before entering the guest */
ret = xfer_to_guest_mode_handle_work(vcpu);
- if (!ret)
- ret = 1;
+ if (ret)
+ continue;
+ ret = 1;
kvm_riscv_gstage_vmid_update(vcpu);
kvm_riscv_check_vcpu_requests(vcpu);
+ preempt_disable();
+
+ /* Update AIA HW state before entering guest */
+ ret = kvm_riscv_vcpu_aia_update(vcpu);
+ if (ret <= 0) {
+ preempt_enable();
+ continue;
+ }
+
local_irq_disable();
/*
@@ -1019,6 +1193,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
xfer_to_guest_mode_work_pending()) {
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
+ preempt_enable();
kvm_vcpu_srcu_read_lock(vcpu);
continue;
}
@@ -1052,8 +1227,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Syncup interrupts state with HW */
kvm_riscv_vcpu_sync_interrupts(vcpu);
- preempt_disable();
-
/*
* We must ensure that any pending interrupts are taken before
* we exit guest timing so that timer ticks are accounted as