提交 6addfc42 编写于 作者: P Paolo Bonzini

KVM: x86: avoid useless set of KVM_REQ_EVENT after emulation

Despite the provisions to emulate up to 130 consecutive instructions, in
practice KVM will emulate just one before exiting handle_invalid_guest_state,
because x86_emulate_instruction always sets KVM_REQ_EVENT.

However, we only need to do this if an interrupt could be injected,
which happens a) if an interrupt shadow bit (STI or MOV SS) has gone
away; b) if the interrupt flag has just been set (other instructions
than STI can set it without enabling an interrupt shadow).

This cuts another 700-900 cycles from the cost of emulating an
instruction (measured on a Sandy Bridge Xeon: 1650-2600 cycles
before the patch on kvm-unit-tests, 925-1700 afterwards).
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 37ccdcbe
...@@ -87,6 +87,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); ...@@ -87,6 +87,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
struct kvm_x86_ops *kvm_x86_ops; struct kvm_x86_ops *kvm_x86_ops;
EXPORT_SYMBOL_GPL(kvm_x86_ops); EXPORT_SYMBOL_GPL(kvm_x86_ops);
...@@ -4868,8 +4869,11 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) ...@@ -4868,8 +4869,11 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
*/ */
if (int_shadow & mask) if (int_shadow & mask)
mask = 0; mask = 0;
if (unlikely(int_shadow || mask)) if (unlikely(int_shadow || mask)) {
kvm_x86_ops->set_interrupt_shadow(vcpu, mask); kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
if (!mask)
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
} }
static void inject_emulated_exception(struct kvm_vcpu *vcpu) static void inject_emulated_exception(struct kvm_vcpu *vcpu)
...@@ -5095,20 +5099,18 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, ...@@ -5095,20 +5099,18 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
return dr6; return dr6;
} }
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
{ {
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
/* /*
* Use the "raw" value to see if TF was passed to the processor. * rflags is the old, "raw" value of the flags. The new value has
* Note that the new value of the flags has not been saved yet. * not been saved yet.
* *
* This is correct even for TF set by the guest, because "the * This is correct even for TF set by the guest, because "the
* processor will not generate this exception after the instruction * processor will not generate this exception after the instruction
* that sets the TF flag". * that sets the TF flag".
*/ */
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
if (unlikely(rflags & X86_EFLAGS_TF)) { if (unlikely(rflags & X86_EFLAGS_TF)) {
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1;
...@@ -5275,13 +5277,22 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -5275,13 +5277,22 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
r = EMULATE_DONE; r = EMULATE_DONE;
if (writeback) { if (writeback) {
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility); toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false; vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE) if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, &r); kvm_vcpu_check_singlestep(vcpu, rflags, &r);
kvm_set_rflags(vcpu, ctxt->eflags); __kvm_set_rflags(vcpu, ctxt->eflags);
/*
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
* do nothing, and it will be requested again as soon as
* the shadow expires. But we still need to check here,
* because POPF has no interrupt shadow.
*/
if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
kvm_make_request(KVM_REQ_EVENT, vcpu);
} else } else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true; vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
...@@ -7406,12 +7417,17 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) ...@@ -7406,12 +7417,17 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_get_rflags); EXPORT_SYMBOL_GPL(kvm_get_rflags);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{ {
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
rflags |= X86_EFLAGS_TF; rflags |= X86_EFLAGS_TF;
kvm_x86_ops->set_rflags(vcpu, rflags); kvm_x86_ops->set_rflags(vcpu, rflags);
}
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
__kvm_set_rflags(vcpu, rflags);
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_set_rflags); EXPORT_SYMBOL_GPL(kvm_set_rflags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册