KVM: arm/arm64: Simplify bg_timer programming
Instead of calling into kvm_timer_[un]schedule from the main kvm blocking path, test if the VCPU is on the wait queue from the load/put path and perform the background timer setup/cancel in this path. This has the distinct advantage that we no longer race between load/put and schedule/unschedule and programming and canceling of the bg_timer always happens when the timer state is not loaded. Note that we must now remove the checks in kvm_timer_blocking that do not schedule a background timer if one of the timers can fire, because we no longer have a guarantee that kvm_vcpu_check_block() will be called before kvm_timer_blocking. Reported-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
e329fb75d5
commit
accb99bcd0
@ -76,9 +76,6 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
|||||||
|
|
||||||
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
|
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
void kvm_timer_schedule(struct kvm_vcpu *vcpu);
|
|
||||||
void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
|
|
||||||
|
|
||||||
u64 kvm_phys_timer_read(void);
|
u64 kvm_phys_timer_read(void);
|
||||||
|
|
||||||
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
|
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
|
||||||
|
@ -349,22 +349,12 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
|
|||||||
* thread is removed from its waitqueue and made runnable when there's a timer
|
* thread is removed from its waitqueue and made runnable when there's a timer
|
||||||
* interrupt to handle.
|
* interrupt to handle.
|
||||||
*/
|
*/
|
||||||
void kvm_timer_schedule(struct kvm_vcpu *vcpu)
|
static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||||
|
|
||||||
vtimer_save_state(vcpu);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* No need to schedule a background timer if any guest timer has
|
|
||||||
* already expired, because kvm_vcpu_block will return before putting
|
|
||||||
* the thread to sleep.
|
|
||||||
*/
|
|
||||||
if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If both timers are not capable of raising interrupts (disabled or
|
* If both timers are not capable of raising interrupts (disabled or
|
||||||
* masked), then there's no more work for us to do.
|
* masked), then there's no more work for us to do.
|
||||||
@ -373,12 +363,19 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The guest timers have not yet expired, schedule a background timer.
|
* At least one guest time will expire. Schedule a background timer.
|
||||||
* Set the earliest expiration time among the guest timers.
|
* Set the earliest expiration time among the guest timers.
|
||||||
*/
|
*/
|
||||||
soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
|
soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||||
|
|
||||||
|
soft_timer_cancel(&timer->bg_timer);
|
||||||
|
}
|
||||||
|
|
||||||
static void vtimer_restore_state(struct kvm_vcpu *vcpu)
|
static void vtimer_restore_state(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||||
@ -401,15 +398,6 @@ static void vtimer_restore_state(struct kvm_vcpu *vcpu)
|
|||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
|
||||||
|
|
||||||
vtimer_restore_state(vcpu);
|
|
||||||
|
|
||||||
soft_timer_cancel(&timer->bg_timer);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void set_cntvoff(u64 cntvoff)
|
static void set_cntvoff(u64 cntvoff)
|
||||||
{
|
{
|
||||||
u32 low = lower_32_bits(cntvoff);
|
u32 low = lower_32_bits(cntvoff);
|
||||||
@ -485,6 +473,8 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
|
|||||||
/* Set the background timer for the physical timer emulation. */
|
/* Set the background timer for the physical timer emulation. */
|
||||||
phys_timer_emulate(vcpu);
|
phys_timer_emulate(vcpu);
|
||||||
|
|
||||||
|
kvm_timer_unblocking(vcpu);
|
||||||
|
|
||||||
/* If the timer fired while we weren't running, inject it now */
|
/* If the timer fired while we weren't running, inject it now */
|
||||||
if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
|
if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
|
||||||
kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
|
kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
|
||||||
@ -527,6 +517,9 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
soft_timer_cancel(&timer->phys_timer);
|
soft_timer_cancel(&timer->phys_timer);
|
||||||
|
|
||||||
|
if (swait_active(kvm_arch_vcpu_wq(vcpu)))
|
||||||
|
kvm_timer_blocking(vcpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The kernel may decide to run userspace after calling vcpu_put, so
|
* The kernel may decide to run userspace after calling vcpu_put, so
|
||||||
* we reset cntvoff to 0 to ensure a consistent read between user
|
* we reset cntvoff to 0 to ensure a consistent read between user
|
||||||
|
@ -335,13 +335,11 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
|
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvm_timer_schedule(vcpu);
|
|
||||||
kvm_vgic_v4_enable_doorbell(vcpu);
|
kvm_vgic_v4_enable_doorbell(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvm_timer_unschedule(vcpu);
|
|
||||||
kvm_vgic_v4_disable_doorbell(vcpu);
|
kvm_vgic_v4_disable_doorbell(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user