Revert "KVM: arm64: Avoid lock inversion when setting the VM register width"
This reverts commit 6c9d3f2a5e
.
It breaks the Android kernel ABI at this point in time, so needs to be
dropped. If it is needed, it can come back in an ABI-safe way in the
future.
Bug: 161946584
Cc: Will Deacon <willdeacon@google.com>
Change-Id: Ifa88c3662cdd4f16e4da4f5d2606976b039055f7
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
parent
b8b87a4a37
commit
2bd2fb9c82
@ -203,9 +203,6 @@ struct kvm_arch {
|
|||||||
/* Mandated version of PSCI */
|
/* Mandated version of PSCI */
|
||||||
u32 psci_version;
|
u32 psci_version;
|
||||||
|
|
||||||
/* Protects VM-scoped configuration data */
|
|
||||||
struct mutex config_lock;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we encounter a data abort without valid instruction syndrome
|
* If we encounter a data abort without valid instruction syndrome
|
||||||
* information, report this to user space. User space can (and
|
* information, report this to user space. User space can (and
|
||||||
|
@ -155,16 +155,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||||||
if (type & ~KVM_VM_TYPE_MASK)
|
if (type & ~KVM_VM_TYPE_MASK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_init(&kvm->arch.config_lock);
|
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP
|
|
||||||
/* Clue in lockdep that the config_lock must be taken inside kvm->lock */
|
|
||||||
mutex_lock(&kvm->lock);
|
|
||||||
mutex_lock(&kvm->arch.config_lock);
|
|
||||||
mutex_unlock(&kvm->arch.config_lock);
|
|
||||||
mutex_unlock(&kvm->lock);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = kvm_share_hyp(kvm, kvm + 1);
|
ret = kvm_share_hyp(kvm, kvm + 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -435,14 +425,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
spin_lock_init(&vcpu->arch.mp_state_lock);
|
spin_lock_init(&vcpu->arch.mp_state_lock);
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP
|
|
||||||
/* Inform lockdep that the config_lock is acquired after vcpu->mutex */
|
|
||||||
mutex_lock(&vcpu->mutex);
|
|
||||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
|
||||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
|
||||||
mutex_unlock(&vcpu->mutex);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Force users to call KVM_ARM_VCPU_INIT */
|
/* Force users to call KVM_ARM_VCPU_INIT */
|
||||||
vcpu->arch.target = -1;
|
vcpu->arch.target = -1;
|
||||||
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
|
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
|
||||||
|
@ -176,7 +176,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
|
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
|
||||||
|
|
||||||
lockdep_assert_held(&kvm->arch.config_lock);
|
lockdep_assert_held(&kvm->lock);
|
||||||
|
|
||||||
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
|
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
|
||||||
/*
|
/*
|
||||||
@ -228,9 +228,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||||||
int ret;
|
int ret;
|
||||||
bool loaded;
|
bool loaded;
|
||||||
|
|
||||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
mutex_lock(&vcpu->kvm->lock);
|
||||||
ret = kvm_set_vm_width(vcpu);
|
ret = kvm_set_vm_width(vcpu);
|
||||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
mutex_unlock(&vcpu->kvm->lock);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
Loading…
Reference in New Issue
Block a user