Revert "KVM: arm64: Use config_lock to protect vgic state"

This reverts commit 569f33c3c2.

It breaks the Android kernel ABI at this point in time, so needs to be
dropped.  If it is needed, it can come back in an ABI-safe way in the
future.

Bug: 161946584
Cc: Will Deacon <willdeacon@google.com>
Change-Id: Ib0885fa39fc6dc991048281513cc7c439680b303
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-10 08:15:26 +00:00
parent c937035a5d
commit 6c2658e477
8 changed files with 60 additions and 88 deletions

View File

@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
struct kvm *kvm = s->private; struct kvm *kvm = s->private;
struct vgic_state_iter *iter; struct vgic_state_iter *iter;
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->lock);
iter = kvm->arch.vgic.iter; iter = kvm->arch.vgic.iter;
if (iter) { if (iter) {
iter = ERR_PTR(-EBUSY); iter = ERR_PTR(-EBUSY);
@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
if (end_of_vgic(iter)) if (end_of_vgic(iter))
iter = NULL; iter = NULL;
out: out:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->lock);
return iter; return iter;
} }
@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
if (IS_ERR(v)) if (IS_ERR(v))
return; return;
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->lock);
iter = kvm->arch.vgic.iter; iter = kvm->arch.vgic.iter;
kfree(iter->lpi_array); kfree(iter->lpi_array);
kfree(iter); kfree(iter);
kvm->arch.vgic.iter = NULL; kvm->arch.vgic.iter = NULL;
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->lock);
} }
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist) static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)

View File

@ -74,6 +74,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
unsigned long i; unsigned long i;
int ret; int ret;
if (irqchip_in_kernel(kvm))
return -EEXIST;
/* /*
* This function is also called by the KVM_CREATE_IRQCHIP handler, * This function is also called by the KVM_CREATE_IRQCHIP handler,
* which had no chance yet to check the availability of the GICv2 * which had no chance yet to check the availability of the GICv2
@ -84,20 +87,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2) !kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV; return -ENODEV;
/* Must be held to avoid race with vCPU creation */
lockdep_assert_held(&kvm->lock);
ret = -EBUSY; ret = -EBUSY;
if (!lock_all_vcpus(kvm)) if (!lock_all_vcpus(kvm))
return ret; return ret;
mutex_lock(&kvm->arch.config_lock);
if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out_unlock;
}
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_has_run_once(vcpu)) if (vcpu_has_run_once(vcpu))
goto out_unlock; goto out_unlock;
@ -125,7 +118,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
out_unlock: out_unlock:
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm); unlock_all_vcpus(kvm);
return ret; return ret;
} }
@ -235,9 +227,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU. * KVM io device for the redistributor that belongs to this VCPU.
*/ */
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->arch.config_lock); mutex_lock(&vcpu->kvm->lock);
ret = vgic_register_redist_iodev(vcpu); ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->arch.config_lock); mutex_unlock(&vcpu->kvm->lock);
} }
return ret; return ret;
} }
@ -258,6 +250,7 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
* The function is generally called when nr_spis has been explicitly set * The function is generally called when nr_spis has been explicitly set
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256. * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
* vgic_initialized() returns true when this function has succeeded. * vgic_initialized() returns true when this function has succeeded.
* Must be called with kvm->lock held!
*/ */
int vgic_init(struct kvm *kvm) int vgic_init(struct kvm *kvm)
{ {
@ -266,8 +259,6 @@ int vgic_init(struct kvm *kvm)
int ret = 0, i; int ret = 0, i;
unsigned long idx; unsigned long idx;
lockdep_assert_held(&kvm->arch.config_lock);
if (vgic_initialized(kvm)) if (vgic_initialized(kvm))
return 0; return 0;
@ -382,13 +373,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
} }
/* To be called with kvm->lock held */
static void __kvm_vgic_destroy(struct kvm *kvm) static void __kvm_vgic_destroy(struct kvm *kvm)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
unsigned long i; unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
vgic_debug_destroy(kvm); vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
@ -399,9 +389,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
void kvm_vgic_destroy(struct kvm *kvm) void kvm_vgic_destroy(struct kvm *kvm)
{ {
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->lock);
__kvm_vgic_destroy(kvm); __kvm_vgic_destroy(kvm);
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->lock);
} }
/** /**
@ -424,9 +414,9 @@ int vgic_lazy_init(struct kvm *kvm)
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
return -EBUSY; return -EBUSY;
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->lock);
ret = vgic_init(kvm); ret = vgic_init(kvm);
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->lock);
} }
return ret; return ret;
@ -451,7 +441,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (likely(vgic_ready(kvm))) if (likely(vgic_ready(kvm)))
return 0; return 0;
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->lock);
if (vgic_ready(kvm)) if (vgic_ready(kvm))
goto out; goto out;
@ -469,7 +459,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
dist->ready = true; dist->ready = true;
out: out:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->lock);
return ret; return ret;
} }

View File

@ -2045,13 +2045,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
ret = -ENXIO; ret = -ENXIO;
goto out; goto out;
@ -2065,6 +2058,11 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
goto out; goto out;
} }
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
addr = its->vgic_its_base + offset; addr = its->vgic_its_base + offset;
len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4; len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
@ -2078,9 +2076,8 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
} else { } else {
*reg = region->its_read(dev->kvm, its, addr, len); *reg = region->its_read(dev->kvm, its, addr, len);
} }
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm); unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return ret; return ret;
} }
@ -2760,8 +2757,6 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
return -EBUSY; return -EBUSY;
} }
mutex_lock(&kvm->arch.config_lock);
switch (attr) { switch (attr) {
case KVM_DEV_ARM_ITS_CTRL_RESET: case KVM_DEV_ARM_ITS_CTRL_RESET:
vgic_its_reset(kvm, its); vgic_its_reset(kvm, its);
@ -2774,7 +2769,6 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
break; break;
} }
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm); unlock_all_vcpus(kvm);
mutex_unlock(&its->its_lock); mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);

View File

@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
struct vgic_dist *vgic = &kvm->arch.vgic; struct vgic_dist *vgic = &kvm->arch.vgic;
int r; int r;
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) { switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
r = -ENODEV; r = -ENODEV;
} }
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->lock);
return r; return r;
} }
@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr)) if (get_user(addr, uaddr))
return -EFAULT; return -EFAULT;
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->lock);
switch (attr->attr) { switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
} }
out: out:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->lock);
if (!r && !write) if (!r && !write)
r = put_user(addr, uaddr); r = put_user(addr, uaddr);
@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
(val & 31)) (val & 31))
return -EINVAL; return -EINVAL;
mutex_lock(&dev->kvm->arch.config_lock); mutex_lock(&dev->kvm->lock);
if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY; ret = -EBUSY;
@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
dev->kvm->arch.vgic.nr_spis = dev->kvm->arch.vgic.nr_spis =
val - VGIC_NR_PRIVATE_IRQS; val - VGIC_NR_PRIVATE_IRQS;
mutex_unlock(&dev->kvm->arch.config_lock); mutex_unlock(&dev->kvm->lock);
return ret; return ret;
} }
case KVM_DEV_ARM_VGIC_GRP_CTRL: { case KVM_DEV_ARM_VGIC_GRP_CTRL: {
switch (attr->attr) { switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT: case KVM_DEV_ARM_VGIC_CTRL_INIT:
mutex_lock(&dev->kvm->arch.config_lock); mutex_lock(&dev->kvm->lock);
r = vgic_init(dev->kvm); r = vgic_init(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock); mutex_unlock(&dev->kvm->lock);
return r; return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES: case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/* /*
@ -260,10 +260,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return -EBUSY; return -EBUSY;
} }
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm); r = vgic_v3_save_pending_tables(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm); unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return r; return r;
@ -414,17 +411,15 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
ret = vgic_init(dev->kvm); ret = vgic_init(dev->kvm);
if (ret) if (ret)
goto out; goto out;
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val); ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
@ -437,9 +432,8 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
break; break;
} }
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm); unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write) if (!ret && !is_write)
@ -575,14 +569,12 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) { if (unlikely(!vgic_initialized(dev->kvm))) {
mutex_unlock(&dev->kvm->lock); ret = -EBUSY;
return -EBUSY; goto out;
} }
mutex_lock(&dev->kvm->arch.config_lock); if (!lock_all_vcpus(dev->kvm)) {
if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
@ -617,9 +609,8 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
break; break;
} }
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm); unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) { if (!ret && uaccess && !is_write) {

View File

@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
case GICD_CTLR: { case GICD_CTLR: {
bool was_enabled, is_hwsgi; bool was_enabled, is_hwsgi;
mutex_lock(&vcpu->kvm->arch.config_lock); mutex_lock(&vcpu->kvm->lock);
was_enabled = dist->enabled; was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq; is_hwsgi = dist->nassgireq;
@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
else if (!was_enabled && dist->enabled) else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm); vgic_kick_vcpus(vcpu->kvm);
mutex_unlock(&vcpu->kvm->arch.config_lock); mutex_unlock(&vcpu->kvm->lock);
break; break;
} }
case GICD_TYPER: case GICD_TYPER:

View File

@ -527,13 +527,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 val; u32 val;
mutex_lock(&vcpu->kvm->arch.config_lock); mutex_lock(&vcpu->kvm->lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
val = __vgic_mmio_read_active(vcpu, addr, len); val = __vgic_mmio_read_active(vcpu, addr, len);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->arch.config_lock); mutex_unlock(&vcpu->kvm->lock);
return val; return val;
} }
@ -622,13 +622,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
{ {
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->arch.config_lock); mutex_lock(&vcpu->kvm->lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val); __vgic_mmio_write_cactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->arch.config_lock); mutex_unlock(&vcpu->kvm->lock);
} }
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
@ -659,13 +659,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
{ {
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->arch.config_lock); mutex_lock(&vcpu->kvm->lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val); __vgic_mmio_write_sactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->arch.config_lock); mutex_unlock(&vcpu->kvm->lock);
} }
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,

View File

@ -232,8 +232,9 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
* @kvm: Pointer to the VM being initialized * @kvm: Pointer to the VM being initialized
* *
* We may be called each time a vITS is created, or when the * We may be called each time a vITS is created, or when the
* vgic is initialized. In both cases, the number of vcpus * vgic is initialized. This relies on kvm->lock to be
* should now be fixed. * held. In both cases, the number of vcpus should now be
* fixed.
*/ */
int vgic_v4_init(struct kvm *kvm) int vgic_v4_init(struct kvm *kvm)
{ {
@ -242,8 +243,6 @@ int vgic_v4_init(struct kvm *kvm)
int nr_vcpus, ret; int nr_vcpus, ret;
unsigned long i; unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vgic_global_state.has_gicv4) if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */ return 0; /* Nothing to see here... move along. */
@ -310,14 +309,14 @@ int vgic_v4_init(struct kvm *kvm)
/** /**
* vgic_v4_teardown - Free the GICv4 data structures * vgic_v4_teardown - Free the GICv4 data structures
* @kvm: Pointer to the VM being destroyed * @kvm: Pointer to the VM being destroyed
*
* Relies on kvm->lock to be held.
*/ */
void vgic_v4_teardown(struct kvm *kvm) void vgic_v4_teardown(struct kvm *kvm)
{ {
struct its_vm *its_vm = &kvm->arch.vgic.its_vm; struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
int i; int i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!its_vm->vpes) if (!its_vm->vpes)
return; return;

View File

@ -24,13 +24,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/* /*
* Locking order is always: * Locking order is always:
* kvm->lock (mutex) * kvm->lock (mutex)
* vcpu->mutex (mutex) * its->cmd_lock (mutex)
* kvm->arch.config_lock (mutex) * its->its_lock (mutex)
* its->cmd_lock (mutex) * vgic_cpu->ap_list_lock must be taken with IRQs disabled
* its->its_lock (mutex) * kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_cpu->ap_list_lock must be taken with IRQs disabled * vgic_irq->irq_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
* *
* As the ap_list_lock might be taken from the timer interrupt handler, * As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower * we have to disable IRQs before taking this lock and everything lower