提交 cf393f75 编写于 作者: A Avi Kivity

KVM: Move NMI IRET fault processing to new vmx_complete_interrupts()

Currently most interrupt exit processing is handled on the entry path,
which is confusing.  Move the NMI IRET fault processing to a new function,
vmx_complete_interrupts(), which is called on the vmexit path.
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 5b5c6a5a
...@@ -2817,6 +2817,27 @@ static void enable_intr_window(struct kvm_vcpu *vcpu) ...@@ -2817,6 +2817,27 @@ static void enable_intr_window(struct kvm_vcpu *vcpu)
enable_irq_window(vcpu); enable_irq_window(vcpu);
} }
static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
bool unblock_nmi;
u8 vector;
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
if (cpu_has_virtual_nmis()) {
unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
/*
* SDM 3: 25.7.1.2
* Re-set bit "block by NMI" before VM entry if vmexit caused by
* a guest IRET fault.
*/
if (unblock_nmi && vector != DF_VECTOR)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
}
}
static void vmx_intr_assist(struct kvm_vcpu *vcpu) static void vmx_intr_assist(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -2873,23 +2894,12 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) ...@@ -2873,23 +2894,12 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
return; return;
} }
if (cpu_has_virtual_nmis()) { if (cpu_has_virtual_nmis()) {
/* if (vcpu->arch.nmi_pending) {
* SDM 3: 25.7.1.2
* Re-set bit "block by NMI" before VM entry if vmexit caused by
* a guest IRET fault.
*/
if ((exit_intr_info_field & INTR_INFO_UNBLOCK_NMI) &&
(exit_intr_info_field & INTR_INFO_VECTOR_MASK) != 8)
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) |
GUEST_INTR_STATE_NMI);
else if (vcpu->arch.nmi_pending) {
if (vmx_nmi_enabled(vcpu)) if (vmx_nmi_enabled(vcpu))
vmx_inject_nmi(vcpu); vmx_inject_nmi(vcpu);
enable_intr_window(vcpu); enable_intr_window(vcpu);
return; return;
} }
} }
if (!kvm_cpu_has_interrupt(vcpu)) if (!kvm_cpu_has_interrupt(vcpu))
return; return;
...@@ -3076,6 +3086,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3076,6 +3086,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
KVMTRACE_0D(NMI, vcpu, handler); KVMTRACE_0D(NMI, vcpu, handler);
asm("int $2"); asm("int $2");
} }
vmx_complete_interrupts(vmx);
} }
static void vmx_free_vmcs(struct kvm_vcpu *vcpu) static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册