提交 b95234c8 编写于 作者: P Paolo Bonzini

kvm: x86: do not use KVM_REQ_EVENT for APICv interrupt injection

Since bf9f6ac8 ("KVM: Update Posted-Interrupts Descriptor when vCPU
is blocked", 2015-09-18) the posted interrupt descriptor is checked
unconditionally for PIR.ON.  Therefore we don't need KVM_REQ_EVENT to
trigger the scan and, if NMIs or SMIs are not involved, we can avoid
the complicated event injection path.

Calling kvm_vcpu_kick if PIR.ON=1 is also useless, though it has been
there since APICv was introduced.

However, without the KVM_REQ_EVENT safety net KVM needs to be much
more careful about races between vmx_deliver_posted_interrupt and
vcpu_enter_guest.  First, the IPI for posted interrupts may be issued
between setting vcpu->mode = IN_GUEST_MODE and disabling interrupts.
If that happens, kvm_trigger_posted_interrupt returns true, but
smp_kvm_posted_intr_ipi doesn't do anything about it.  The guest is
entered with PIR.ON, but the posted interrupt IPI has not been sent
and the interrupt is only delivered to the guest on the next vmentry
(if any).  To fix this, disable interrupts before setting vcpu->mode.
This ensures that the IPI is delayed until the guest enters non-root mode;
it is then trapped by the processor causing the interrupt to be injected.

Second, the IPI may be issued between kvm_x86_ops->sync_pir_to_irr(vcpu)
and vcpu->mode = IN_GUEST_MODE.  In this case, kvm_vcpu_kick is called
but it (correctly) doesn't do anything because it sees vcpu->mode ==
OUTSIDE_GUEST_MODE.  Again, the guest is entered with PIR.ON but no
posted interrupt IPI is pending; this time, the fix for this is to move
the RVI update after IN_GUEST_MODE.

Both issues were mostly masked by the liberal usage of KVM_REQ_EVENT,
though the second could actually happen with VT-d posted interrupts.
In both race scenarios KVM_REQ_EVENT would cancel guest entry, resulting
in another vmentry which would inject the interrupt.

This saves about 300 cycles on the self_ipi_* tests of vmexit.flat.
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 76dfafd5
...@@ -385,12 +385,8 @@ EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); ...@@ -385,12 +385,8 @@ EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
int max_irr;
max_irr = __kvm_apic_update_irr(pir, apic->regs); return __kvm_apic_update_irr(pir, apic->regs);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return max_irr;
} }
EXPORT_SYMBOL_GPL(kvm_apic_update_irr); EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
...@@ -423,9 +419,10 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) ...@@ -423,9 +419,10 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
vcpu = apic->vcpu; vcpu = apic->vcpu;
if (unlikely(vcpu->arch.apicv_active)) { if (unlikely(vcpu->arch.apicv_active)) {
/* try to update RVI */ /* need to update RVI */
apic_clear_vector(vec, apic->regs + APIC_IRR); apic_clear_vector(vec, apic->regs + APIC_IRR);
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_x86_ops->hwapic_irr_update(vcpu,
apic_find_highest_irr(apic));
} else { } else {
apic->irr_pending = false; apic->irr_pending = false;
apic_clear_vector(vec, apic->regs + APIC_IRR); apic_clear_vector(vec, apic->regs + APIC_IRR);
......
...@@ -5051,9 +5051,11 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) ...@@ -5051,9 +5051,11 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
if (pi_test_and_set_pir(vector, &vmx->pi_desc)) if (pi_test_and_set_pir(vector, &vmx->pi_desc))
return; return;
r = pi_test_and_set_on(&vmx->pi_desc); /* If a previous notification has sent the IPI, nothing to do. */
kvm_make_request(KVM_REQ_EVENT, vcpu); if (pi_test_and_set_on(&vmx->pi_desc))
if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu)) return;
if (!kvm_vcpu_trigger_posted_interrupt(vcpu))
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} }
......
...@@ -6813,19 +6813,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6813,19 +6813,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_hv_process_stimers(vcpu); kvm_hv_process_stimers(vcpu);
} }
/*
* KVM_REQ_EVENT is not set when posted interrupts are set by
* VT-d hardware, so we have to update RVI unconditionally.
*/
if (kvm_lapic_enabled(vcpu)) {
/*
* Update architecture specific hints for APIC
* virtual interrupt delivery.
*/
if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
kvm_x86_ops->sync_pir_to_irr(vcpu);
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
++vcpu->stat.req_event; ++vcpu->stat.req_event;
kvm_apic_accept_events(vcpu); kvm_apic_accept_events(vcpu);
...@@ -6870,20 +6857,39 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6870,20 +6857,39 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->prepare_guest_switch(vcpu); kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active) if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu); kvm_load_guest_fpu(vcpu);
/*
* Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
* IPI are then delayed after guest entry, which ensures that they
* result in virtual interrupt delivery.
*/
local_irq_disable();
vcpu->mode = IN_GUEST_MODE; vcpu->mode = IN_GUEST_MODE;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
/* /*
* We should set ->mode before check ->requests, * 1) We should set ->mode before checking ->requests. Please see
* Please see the comment in kvm_make_all_cpus_request. * the comment in kvm_make_all_cpus_request.
* This also orders the write to mode from any reads *
* to the page tables done while the VCPU is running. * 2) For APICv, we should set ->mode before checking PIR.ON. This
* Please see the comment in kvm_flush_remote_tlbs. * pairs with the memory barrier implicit in pi_test_and_set_on
* (see vmx_deliver_posted_interrupt).
*
* 3) This also orders the write to mode from any reads to the page
* tables done while the VCPU is running. Please see the comment
* in kvm_flush_remote_tlbs.
*/ */
smp_mb__after_srcu_read_unlock(); smp_mb__after_srcu_read_unlock();
local_irq_disable(); /*
* This handles the case where a posted interrupt was
* notified with kvm_vcpu_kick.
*/
if (kvm_lapic_enabled(vcpu)) {
if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
kvm_x86_ops->sync_pir_to_irr(vcpu);
}
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) { || need_resched() || signal_pending(current)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册