提交 a3d7f85f 编写于 作者: E Eddie Dong 提交者: Avi Kivity

KVM: Migrate lapic hrtimer when vcpu moves to another cpu

This reduces overhead by accessing cachelines from the wrong node, as well
as simplifying locking.

[Qing: fix for inactive or expired one-shot timer]
Signed-off-by: NYaozu (Eddie) Dong <Eddie.Dong@intel.com>
Signed-off-by: NQing He <qing.he@intel.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 1b9778da
...@@ -158,5 +158,6 @@ void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec); ...@@ -158,5 +158,6 @@ void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
#endif #endif
...@@ -979,3 +979,17 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) ...@@ -979,3 +979,17 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
update_divide_count(apic); update_divide_count(apic);
start_apic_timer(apic); start_apic_timer(apic);
} }
void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->apic;
struct hrtimer *timer;
if (!apic)
return;
timer = &apic->timer.dev;
if (hrtimer_cancel(timer))
hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
}
EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer);
...@@ -633,6 +633,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -633,6 +633,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
delta = vcpu->host_tsc - tsc_this; delta = vcpu->host_tsc - tsc_this;
svm->vmcb->control.tsc_offset += delta; svm->vmcb->control.tsc_offset += delta;
vcpu->cpu = cpu; vcpu->cpu = cpu;
kvm_migrate_apic_timer(vcpu);
} }
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
......
...@@ -441,8 +441,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -441,8 +441,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
u64 phys_addr = __pa(vmx->vmcs); u64 phys_addr = __pa(vmx->vmcs);
u64 tsc_this, delta; u64 tsc_this, delta;
if (vcpu->cpu != cpu) if (vcpu->cpu != cpu) {
vcpu_clear(vmx); vcpu_clear(vmx);
kvm_migrate_apic_timer(vcpu);
}
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error; u8 error;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册