aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2019-05-23 16:36:32 +1000
committerPaul Gortmaker <paul.gortmaker@windriver.com>2019-09-16 12:21:42 -0400
commitc91a76e30065e28ee834bc2a53a94a785779f827 (patch)
treef7fb6ab2fd740b0e0f14a37e0b5cb09a514719e3
parent6de06a0e679314f188dddefb073131019c077b93 (diff)
downloadlinux-yocto-c91a76e30065e28ee834bc2a53a94a785779f827.tar.gz
linux-yocto-c91a76e30065e28ee834bc2a53a94a785779f827.tar.bz2
linux-yocto-c91a76e30065e28ee834bc2a53a94a785779f827.zip
KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu
commit 5a3f49364c3ffa1107bd88f8292406e98c5d206c upstream. Currently the HV KVM code takes the kvm->lock around calls to kvm_for_each_vcpu() and kvm_get_vcpu_by_id() (which can call kvm_for_each_vcpu() internally). However, that leads to a lock order inversion problem, because these are called in contexts where the vcpu mutex is held, but the vcpu mutexes nest within kvm->lock according to Documentation/virtual/kvm/locking.txt. Hence there is a possibility of deadlock. To fix this, we simply don't take the kvm->lock mutex around these calls. This is safe because the implementations of kvm_for_each_vcpu() and kvm_get_vcpu_by_id() have been designed to be able to be called locklessly. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Reviewed-by: C├ędric Le Goater <clg@kaod.org> Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--arch/powerpc/kvm/book3s_hv.c9
1 files changed, 1 insertions, 8 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2ebd5132a29f..85f63a0c0fb1 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -427,12 +427,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
- struct kvm_vcpu *ret;
-
- mutex_lock(&kvm->lock);
- ret = kvm_get_vcpu_by_id(kvm, id);
- mutex_unlock(&kvm->lock);
- return ret;
+ return kvm_get_vcpu_by_id(kvm, id);
}
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1310,7 +1305,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
- mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
@@ -1350,7 +1344,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
- mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,