diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ec14b7245a4ecd5a951364c63d452f87859ebf84..3741c653767ca958d0cc56f91d4807a4db7440b7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -695,7 +695,7 @@ struct kvm_x86_ops { int (*nmi_allowed)(struct kvm_vcpu *vcpu); bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); - void (*enable_nmi_window)(struct kvm_vcpu *vcpu); + int (*enable_nmi_window)(struct kvm_vcpu *vcpu); int (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); int (*vm_has_apicv)(struct kvm *kvm); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 7f896cbe717f933ffd5f1d8be8470442823421fc..3421d5a92c06f6de63da263e7e0c1877f49cd10a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3649,13 +3649,13 @@ static int enable_irq_window(struct kvm_vcpu *vcpu) return 0; } -static void enable_nmi_window(struct kvm_vcpu *vcpu) +static int enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) - return; /* IRET will cause a vm exit */ + return 0; /* IRET will cause a vm exit */ /* * Something prevents NMI from been injected. Single step over possible @@ -3664,6 +3664,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) svm->nmi_singlestep = true; svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); update_db_bp_intercept(vcpu); + return 0; } static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e10217e99d2c1f395fe1eef98b32c282ac81be69..e53a5f7aa8a962b844f56f1c53f16b883be6f024 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4417,22 +4417,20 @@ static int enable_irq_window(struct kvm_vcpu *vcpu) return 0; } -static void enable_nmi_window(struct kvm_vcpu *vcpu) +static int enable_nmi_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; - if (!cpu_has_virtual_nmis()) { - enable_irq_window(vcpu); - return; - } + if (!cpu_has_virtual_nmis()) + return enable_irq_window(vcpu); + + if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) + return enable_irq_window(vcpu); - if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { - enable_irq_window(vcpu); - return; - } cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); + return 0; } static void vmx_inject_irq(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 96f914e828d4832921a9e4b9b7cdd0fabd3dd870..94f35d22c5fbbaa3e6a72018bf2fd4b9e4efdec0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5756,7 +5756,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) - kvm_x86_ops->enable_nmi_window(vcpu); + req_immediate_exit = + kvm_x86_ops->enable_nmi_window(vcpu) != 0; else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) req_immediate_exit = kvm_x86_ops->enable_irq_window(vcpu) != 0;