KVM: VMX: refactor/fix IRQ and NMI injectability determination
There are currently two ways in VMX to check if an IRQ or NMI can be injected: - vmx_{nmi|irq}_enabled and - vcpu.arch.{nmi|interrupt}_window_open. Even worse, one test (at the end of vmx_vcpu_run) uses an inconsistent, likely incorrect logic. This patch consolidates and unifies the tests over {nmi|interrupt}_window_open as cache + vmx_update_window_states for updating the cache content. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
@ -327,6 +327,7 @@ struct kvm_vcpu_arch {
|
|||||||
|
|
||||||
bool nmi_pending;
|
bool nmi_pending;
|
||||||
bool nmi_injected;
|
bool nmi_injected;
|
||||||
|
bool nmi_window_open;
|
||||||
|
|
||||||
u64 mtrr[0x100];
|
u64 mtrr[0x100];
|
||||||
};
|
};
|
||||||
|
@ -2362,6 +2362,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
|||||||
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
|
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vmx_update_window_states(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
||||||
|
|
||||||
|
vcpu->arch.nmi_window_open =
|
||||||
|
!(guest_intr & (GUEST_INTR_STATE_STI |
|
||||||
|
GUEST_INTR_STATE_MOV_SS |
|
||||||
|
GUEST_INTR_STATE_NMI));
|
||||||
|
|
||||||
|
vcpu->arch.interrupt_window_open =
|
||||||
|
((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
|
||||||
|
!(guest_intr & (GUEST_INTR_STATE_STI |
|
||||||
|
GUEST_INTR_STATE_MOV_SS)));
|
||||||
|
}
|
||||||
|
|
||||||
static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
|
static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int word_index = __ffs(vcpu->arch.irq_summary);
|
int word_index = __ffs(vcpu->arch.irq_summary);
|
||||||
@ -2374,15 +2389,12 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
|
|||||||
kvm_queue_interrupt(vcpu, irq);
|
kvm_queue_interrupt(vcpu, irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void do_interrupt_requests(struct kvm_vcpu *vcpu,
|
static void do_interrupt_requests(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_run *kvm_run)
|
struct kvm_run *kvm_run)
|
||||||
{
|
{
|
||||||
u32 cpu_based_vm_exec_control;
|
u32 cpu_based_vm_exec_control;
|
||||||
|
|
||||||
vcpu->arch.interrupt_window_open =
|
vmx_update_window_states(vcpu);
|
||||||
((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
|
|
||||||
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
|
|
||||||
|
|
||||||
if (vcpu->arch.interrupt_window_open &&
|
if (vcpu->arch.interrupt_window_open &&
|
||||||
vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
|
vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
|
||||||
@ -3075,22 +3087,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
|||||||
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
|
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmx_nmi_enabled(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
|
||||||
return !(guest_intr & (GUEST_INTR_STATE_NMI |
|
|
||||||
GUEST_INTR_STATE_MOV_SS |
|
|
||||||
GUEST_INTR_STATE_STI));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int vmx_irq_enabled(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
|
||||||
return (!(guest_intr & (GUEST_INTR_STATE_MOV_SS |
|
|
||||||
GUEST_INTR_STATE_STI)) &&
|
|
||||||
(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void enable_intr_window(struct kvm_vcpu *vcpu)
|
static void enable_intr_window(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (vcpu->arch.nmi_pending)
|
if (vcpu->arch.nmi_pending)
|
||||||
@ -3159,11 +3155,13 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
update_tpr_threshold(vcpu);
|
update_tpr_threshold(vcpu);
|
||||||
|
|
||||||
|
vmx_update_window_states(vcpu);
|
||||||
|
|
||||||
if (cpu_has_virtual_nmis()) {
|
if (cpu_has_virtual_nmis()) {
|
||||||
if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
|
if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
|
||||||
if (vcpu->arch.interrupt.pending) {
|
if (vcpu->arch.interrupt.pending) {
|
||||||
enable_nmi_window(vcpu);
|
enable_nmi_window(vcpu);
|
||||||
} else if (vmx_nmi_enabled(vcpu)) {
|
} else if (vcpu->arch.nmi_window_open) {
|
||||||
vcpu->arch.nmi_pending = false;
|
vcpu->arch.nmi_pending = false;
|
||||||
vcpu->arch.nmi_injected = true;
|
vcpu->arch.nmi_injected = true;
|
||||||
} else {
|
} else {
|
||||||
@ -3178,7 +3176,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) {
|
if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) {
|
||||||
if (vmx_irq_enabled(vcpu))
|
if (vcpu->arch.interrupt_window_open)
|
||||||
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
|
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
|
||||||
else
|
else
|
||||||
enable_irq_window(vcpu);
|
enable_irq_window(vcpu);
|
||||||
@ -3339,9 +3337,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||||||
if (vmx->rmode.irq.pending)
|
if (vmx->rmode.irq.pending)
|
||||||
fixup_rmode_irq(vmx);
|
fixup_rmode_irq(vmx);
|
||||||
|
|
||||||
vcpu->arch.interrupt_window_open =
|
vmx_update_window_states(vcpu);
|
||||||
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
|
||||||
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)) == 0;
|
|
||||||
|
|
||||||
asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
|
asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
|
||||||
vmx->launched = 1;
|
vmx->launched = 1;
|
||||||
|
Reference in New Issue
Block a user