提交 4ba76538 编写于 作者: H Haozhong Zhang 提交者: Paolo Bonzini

KVM: x86: Move TSC scaling logic out of call-back read_l1_tsc()

Both VMX and SVM scales the host TSC in the same way in call-back
read_l1_tsc(), so this patch moves the scaling logic from call-back
read_l1_tsc() to a common function kvm_read_l1_tsc().
Signed-off-by: NHaozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 58ea6767
...@@ -1226,6 +1226,7 @@ void kvm_define_shared_msr(unsigned index, u32 msr); ...@@ -1226,6 +1226,7 @@ void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
......
...@@ -1250,7 +1250,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) ...@@ -1250,7 +1250,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
tsc_deadline = apic->lapic_timer.expired_tscdeadline; tsc_deadline = apic->lapic_timer.expired_tscdeadline;
apic->lapic_timer.expired_tscdeadline = 0; apic->lapic_timer.expired_tscdeadline = 0;
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */ /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
...@@ -1318,7 +1318,7 @@ static void start_apic_timer(struct kvm_lapic *apic) ...@@ -1318,7 +1318,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
local_irq_save(flags); local_irq_save(flags);
now = apic->lapic_timer.timer.base->get_time(); now = apic->lapic_timer.timer.base->get_time();
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
if (likely(tscdeadline > guest_tsc)) { if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL; ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz); do_div(ns, this_tsc_khz);
......
...@@ -2984,8 +2984,7 @@ static int cr8_write_interception(struct vcpu_svm *svm) ...@@ -2984,8 +2984,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{ {
struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
return vmcb->control.tsc_offset + return vmcb->control.tsc_offset + host_tsc;
kvm_scale_tsc(vcpu, host_tsc);
} }
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
......
...@@ -1401,6 +1401,12 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) ...@@ -1401,6 +1401,12 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
return target_tsc - tsc; return target_tsc - tsc;
} }
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -1738,7 +1744,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1738,7 +1744,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
kernel_ns = get_kernel_ns(); kernel_ns = get_kernel_ns();
} }
tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
/* /*
* We may have to catch up the TSC to match elapsed wall clock * We may have to catch up the TSC to match elapsed wall clock
...@@ -6545,8 +6551,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6545,8 +6551,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (hw_breakpoint_active()) if (hw_breakpoint_active())
hw_breakpoint_restore(); hw_breakpoint_restore();
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
rdtsc());
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb(); smp_wmb();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册