Revert "KVM: arm64: Use config_lock to protect vgic state"

This reverts commit 569f33c3c2.

It breaks the Android kernel ABI at this point in time, so needs to be
dropped.  If it is needed, it can come back in an ABI-safe way in the
future.

Bug: 161946584
Cc: Will Deacon <willdeacon@google.com>
Change-Id: Ib0885fa39fc6dc991048281513cc7c439680b303
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-10 08:15:26 +00:00
parent c937035a5d
commit 6c2658e477
8 changed files with 60 additions and 88 deletions

View File

@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&kvm->lock);
iter = kvm->arch.vgic.iter;
if (iter) {
iter = ERR_PTR(-EBUSY);
@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
if (end_of_vgic(iter))
iter = NULL;
out:
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
return iter;
}
@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
if (IS_ERR(v))
return;
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&kvm->lock);
iter = kvm->arch.vgic.iter;
kfree(iter->lpi_array);
kfree(iter);
kvm->arch.vgic.iter = NULL;
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
}
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)

View File

@ -74,6 +74,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
unsigned long i;
int ret;
if (irqchip_in_kernel(kvm))
return -EEXIST;
/*
* This function is also called by the KVM_CREATE_IRQCHIP handler,
* which had no chance yet to check the availability of the GICv2
@ -84,20 +87,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV;
/* Must be held to avoid race with vCPU creation */
lockdep_assert_held(&kvm->lock);
ret = -EBUSY;
if (!lock_all_vcpus(kvm))
return ret;
mutex_lock(&kvm->arch.config_lock);
if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out_unlock;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_has_run_once(vcpu))
goto out_unlock;
@ -125,7 +118,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
return ret;
}
@ -235,9 +227,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU.
*/
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_lock(&vcpu->kvm->lock);
ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->lock);
}
return ret;
}
@ -258,6 +250,7 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
* The function is generally called when nr_spis has been explicitly set
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
* vgic_initialized() returns true when this function has succeeded.
* Must be called with kvm->lock held!
*/
int vgic_init(struct kvm *kvm)
{
@ -266,8 +259,6 @@ int vgic_init(struct kvm *kvm)
int ret = 0, i;
unsigned long idx;
lockdep_assert_held(&kvm->arch.config_lock);
if (vgic_initialized(kvm))
return 0;
@ -382,13 +373,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
/* To be called with kvm->lock held */
static void __kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
@ -399,9 +389,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
void kvm_vgic_destroy(struct kvm *kvm)
{
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&kvm->lock);
__kvm_vgic_destroy(kvm);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
}
/**
@ -424,9 +414,9 @@ int vgic_lazy_init(struct kvm *kvm)
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
return -EBUSY;
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&kvm->lock);
ret = vgic_init(kvm);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
}
return ret;
@ -451,7 +441,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (likely(vgic_ready(kvm)))
return 0;
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&kvm->lock);
if (vgic_ready(kvm))
goto out;
@ -469,7 +459,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
dist->ready = true;
out:
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
return ret;
}

View File

@ -2045,13 +2045,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
ret = -ENXIO;
goto out;
@ -2065,6 +2058,11 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
goto out;
}
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
addr = its->vgic_its_base + offset;
len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
@ -2078,9 +2076,8 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
} else {
*reg = region->its_read(dev->kvm, its, addr, len);
}
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->lock);
return ret;
}
@ -2760,8 +2757,6 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
return -EBUSY;
}
mutex_lock(&kvm->arch.config_lock);
switch (attr) {
case KVM_DEV_ARM_ITS_CTRL_RESET:
vgic_its_reset(kvm, its);
@ -2774,7 +2769,6 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
break;
}
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->lock);

View File

@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
struct vgic_dist *vgic = &kvm->arch.vgic;
int r;
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&kvm->lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
r = -ENODEV;
}
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
return r;
}
@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr))
return -EFAULT;
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&kvm->lock);
switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
}
out:
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
if (!r && !write)
r = put_user(addr, uaddr);
@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
(val & 31))
return -EINVAL;
mutex_lock(&dev->kvm->arch.config_lock);
mutex_lock(&dev->kvm->lock);
if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY;
@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
dev->kvm->arch.vgic.nr_spis =
val - VGIC_NR_PRIVATE_IRQS;
mutex_unlock(&dev->kvm->arch.config_lock);
mutex_unlock(&dev->kvm->lock);
return ret;
}
case KVM_DEV_ARM_VGIC_GRP_CTRL: {
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
mutex_lock(&dev->kvm->arch.config_lock);
mutex_lock(&dev->kvm->lock);
r = vgic_init(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
mutex_unlock(&dev->kvm->lock);
return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/*
@ -260,10 +260,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
@ -414,17 +411,15 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
ret = vgic_init(dev->kvm);
if (ret)
goto out;
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
@ -437,9 +432,8 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
break;
}
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write)
@ -575,14 +569,12 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY;
goto out;
}
mutex_lock(&dev->kvm->arch.config_lock);
if (unlikely(!vgic_initialized(dev->kvm))) {
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
@ -617,9 +609,8 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
break;
}
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) {

View File

@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
case GICD_CTLR: {
bool was_enabled, is_hwsgi;
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_lock(&vcpu->kvm->lock);
was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq;
@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->lock);
break;
}
case GICD_TYPER:

View File

@ -527,13 +527,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 val;
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_lock(&vcpu->kvm->lock);
vgic_access_active_prepare(vcpu, intid);
val = __vgic_mmio_read_active(vcpu, addr, len);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->lock);
return val;
}
@ -622,13 +622,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_lock(&vcpu->kvm->lock);
vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->lock);
}
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
@ -659,13 +659,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_lock(&vcpu->kvm->lock);
vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->lock);
}
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,

View File

@ -232,8 +232,9 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
* @kvm: Pointer to the VM being initialized
*
* We may be called each time a vITS is created, or when the
* vgic is initialized. In both cases, the number of vcpus
* should now be fixed.
* vgic is initialized. This relies on kvm->lock to be
* held. In both cases, the number of vcpus should now be
* fixed.
*/
int vgic_v4_init(struct kvm *kvm)
{
@ -242,8 +243,6 @@ int vgic_v4_init(struct kvm *kvm)
int nr_vcpus, ret;
unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */
@ -310,14 +309,14 @@ int vgic_v4_init(struct kvm *kvm)
/**
* vgic_v4_teardown - Free the GICv4 data structures
* @kvm: Pointer to the VM being destroyed
*
* Relies on kvm->lock to be held.
*/
void vgic_v4_teardown(struct kvm *kvm)
{
struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
int i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!its_vm->vpes)
return;

View File

@ -24,13 +24,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/*
* Locking order is always:
* kvm->lock (mutex)
* vcpu->mutex (mutex)
* kvm->arch.config_lock (mutex)
* its->cmd_lock (mutex)
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
* its->cmd_lock (mutex)
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower