未验证 提交 1661fbec 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!1617 LoonArch: KVM: fix vcpu timer

Merge Pull Request from: @ddjsaiwqq 
 
Fix vcpu timer initialize, saved, restore and triggered functions.
As the previous code does not init vcpu timer, and the timer value
should add time value instead of ticks value to calculate the expire 
time when vcpu timer triggered.
 
 
Link:https://gitee.com/openeuler/kernel/pulls/1617 

Reviewed-by: Kevin Zhu <zhukeqian1@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
...@@ -220,16 +220,9 @@ struct kvm_vcpu_arch { ...@@ -220,16 +220,9 @@ struct kvm_vcpu_arch {
/* vcpu's vpid is different on each host cpu in an smp system */ /* vcpu's vpid is different on each host cpu in an smp system */
u64 vpid[NR_CPUS]; u64 vpid[NR_CPUS];
/* Period of stable timer tick in ns */
u64 timer_period;
/* Frequency of stable timer in Hz */ /* Frequency of stable timer in Hz */
u64 timer_mhz; u64 timer_mhz;
/* Stable bias from the raw time */ ktime_t expire;
u64 timer_bias;
/* Dynamic nanosecond bias (multiple of timer_period) to avoid overflow */
s64 timer_dyn_bias;
/* Save ktime */
ktime_t stable_ktime_saved;
u64 core_ext_ioisr[4]; u64 core_ext_ioisr[4];
......
...@@ -101,10 +101,10 @@ void kvm_restore_hw_perf(struct kvm_vcpu *vcpu); ...@@ -101,10 +101,10 @@ void kvm_restore_hw_perf(struct kvm_vcpu *vcpu);
void kvm_acquire_timer(struct kvm_vcpu *vcpu); void kvm_acquire_timer(struct kvm_vcpu *vcpu);
void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_reset_timer(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu);
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_restore_timer(struct kvm_vcpu *vcpu); void kvm_restore_timer(struct kvm_vcpu *vcpu);
void kvm_save_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
/* /*
* Loongarch KVM guest interrupt handling. * Loongarch KVM guest interrupt handling.
......
...@@ -499,20 +499,11 @@ static int _kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -499,20 +499,11 @@ static int _kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
return ret; return ret;
} }
/* low level hrtimer wake routine */
static enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
kvm_vcpu_wake_up(vcpu);
return kvm_count_timeout(vcpu);
}
static void _kvm_vcpu_init(struct kvm_vcpu *vcpu) static void _kvm_vcpu_init(struct kvm_vcpu *vcpu)
{ {
int i; int i;
unsigned long timer_hz;
struct loongarch_csrs *csr = vcpu->arch.csr;
for_each_possible_cpu(i) for_each_possible_cpu(i)
vcpu->arch.vpid[i] = 0; vcpu->arch.vpid[i] = 0;
...@@ -522,6 +513,21 @@ static void _kvm_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -522,6 +513,21 @@ static void _kvm_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.swtimer.function = kvm_swtimer_wakeup; vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
vcpu->arch.fpu_enabled = true; vcpu->arch.fpu_enabled = true;
vcpu->arch.lsx_enabled = true; vcpu->arch.lsx_enabled = true;
/*
* Initialize guest register state to valid architectural reset state.
*/
timer_hz = calc_const_freq();
kvm_init_timer(vcpu, timer_hz);
/* Set Initialize mode for GUEST */
kvm_write_sw_gcsr(csr, KVM_CSR_CRMD, KVM_CRMD_DA);
/* Set cpuid */
kvm_write_sw_gcsr(csr, KVM_CSR_TMID, vcpu->vcpu_id);
/* start with no pending virtual guest interrupts */
csr->csrs[KVM_CSR_GINTC] = 0;
} }
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
...@@ -1776,29 +1782,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, ...@@ -1776,29 +1782,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
/* Initial guest state */
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct loongarch_csrs *csr = vcpu->arch.csr;
unsigned long timer_hz;
/*
* Initialize guest register state to valid architectural reset state.
*/
timer_hz = calc_const_freq();
kvm_init_timer(vcpu, timer_hz);
/* Set Initialize mode for GUEST */
kvm_write_sw_gcsr(csr, KVM_CSR_CRMD, KVM_CRMD_DA);
/* Set cpuid */
kvm_write_sw_gcsr(csr, KVM_CSR_TMID, vcpu->vcpu_id);
/* start with no pending virtual guest interrupts */
csr->csrs[KVM_CSR_GINTC] = 0;
return 0;
}
/* Enable FPU for guest and restore context */ /* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu) void kvm_own_fpu(struct kvm_vcpu *vcpu)
{ {
......
...@@ -19,256 +19,195 @@ ...@@ -19,256 +19,195 @@
#include <asm/inst.h> #include <asm/inst.h>
#include "kvmcpu.h" #include "kvmcpu.h"
#include "trace.h" #include "trace.h"
#include "kvm_compat.h"
/* /*
* ktime_to_tick() - Scale ktime_t to a 64-bit stable timer. * ktime_to_tick() - Scale ktime_t to timer tick value.
*
* Caches the dynamic nanosecond bias in vcpu->arch.timer_dyn_bias.
*/ */
static u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now) static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
{ {
s64 now_ns, periods;
u64 delta; u64 delta;
now_ns = ktime_to_ns(now); delta = ktime_to_ns(now);
delta = now_ns + vcpu->arch.timer_dyn_bias;
if (delta >= vcpu->arch.timer_period) {
/* If delta is out of safe range the bias needs adjusting */
periods = div64_s64(now_ns, vcpu->arch.timer_period);
vcpu->arch.timer_dyn_bias = -periods * vcpu->arch.timer_period;
/* Recalculate delta with new bias */
delta = now_ns + vcpu->arch.timer_dyn_bias;
}
/*
* We've ensured that:
* delta < timer_period
*/
return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC); return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
} }
/** static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
* kvm_resume_hrtimer() - Resume hrtimer, updating expiry.
* @vcpu: Virtual CPU.
* @now: ktime at point of resume.
* @stable_timer: stable timer at point of resume.
*
* Resumes the timer and updates the timer expiry based on @now and @count.
*/
static void kvm_resume_hrtimer(struct kvm_vcpu *vcpu, ktime_t now, u64 stable_timer)
{ {
u64 delta; return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
ktime_t expire;
/* Stable timer decreased to zero or
* initialize to zero, set 4 second timer
*/
delta = div_u64(stable_timer * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
expire = ktime_add_ns(now, delta);
/* Update hrtimer to use new timeout */
hrtimer_cancel(&vcpu->arch.swtimer);
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
} }
/** /*
* kvm_init_timer() - Initialise stable timer. * Push timer forward on timeout.
* @vcpu: Virtual CPU. * Handle an hrtimer event by push the hrtimer forward a period.
* @timer_hz: Frequency of timer.
*
* Initialise the timer to the specified frequency, zero it, and set it going if
* it's enabled.
*/ */
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
{ {
ktime_t now; unsigned long cfg, period;
unsigned long ticks;
struct loongarch_csrs *csr = vcpu->arch.csr;
vcpu->arch.timer_mhz = timer_hz >> 20; /* Add periodic tick to current expire time */
vcpu->arch.timer_period = div_u64((u64)MNSEC_PER_SEC * IOCSR_TIMER_MASK, vcpu->arch.timer_mhz); cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
vcpu->arch.timer_dyn_bias = 0; if (cfg & CSR_TCFG_PERIOD) {
period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
}
/* Starting at 0 */ /* low level hrtimer wake routine */
ticks = 0; enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
now = ktime_get(); {
vcpu->arch.timer_bias = ticks - ktime_to_tick(vcpu, now); struct kvm_vcpu *vcpu;
vcpu->arch.timer_bias &= IOCSR_TIMER_MASK;
kvm_write_sw_gcsr(csr, KVM_CSR_TVAL, ticks); vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
rcuwait_wake_up(&vcpu->wait);
return kvm_count_timeout(vcpu);
} }
/** /*
* kvm_count_timeout() - Push timer forward on timeout. * Initialise the timer to the specified frequency, zero it
* @vcpu: Virtual CPU.
*
* Handle an hrtimer event by push the hrtimer forward a period.
*
* Returns: The hrtimer_restart value to return to the hrtimer subsystem.
*/ */
enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu) void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
{ {
unsigned long timer_cfg; vcpu->arch.timer_mhz = timer_hz >> 20;
/* Add the Count period to the current expiry time */ /* Starting at 0 */
timer_cfg = kvm_read_sw_gcsr(vcpu->arch.csr, KVM_CSR_TCFG); kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
if (timer_cfg & KVM_TCFG_PERIOD) {
hrtimer_add_expires_ns(&vcpu->arch.swtimer, timer_cfg & KVM_TCFG_VAL);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
} }
/* /*
* kvm_restore_timer() - Restore timer state.
* @vcpu: Virtual CPU.
*
* Restore soft timer state from saved context. * Restore soft timer state from saved context.
*/ */
void kvm_restore_timer(struct kvm_vcpu *vcpu) void kvm_restore_timer(struct kvm_vcpu *vcpu)
{ {
struct loongarch_csrs *csr = vcpu->arch.csr; struct loongarch_csrs *csr = vcpu->arch.csr;
ktime_t saved_ktime, now; ktime_t expire, now;
u64 stable_timer, new_timertick = 0; unsigned long cfg, delta, period;
u64 delta = 0;
int expired = 0;
unsigned long timer_cfg;
/* /*
* Set guest stable timer cfg csr * Set guest stable timer cfg csr
*/ */
timer_cfg = kvm_read_sw_gcsr(csr, KVM_CSR_TCFG); cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
kvm_restore_hw_gcsr(csr, KVM_CSR_ESTAT); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
if (!(timer_cfg & KVM_TCFG_EN)) { kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
kvm_restore_hw_gcsr(csr, KVM_CSR_TCFG); if (!(cfg & CSR_TCFG_EN)) {
kvm_restore_hw_gcsr(csr, KVM_CSR_TVAL); /* guest timer is disabled, just restore timer registers */
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
return; return;
} }
/*
* set remainder tick value if not expired
*/
now = ktime_get(); now = ktime_get();
saved_ktime = vcpu->arch.stable_ktime_saved; expire = vcpu->arch.expire;
stable_timer = kvm_read_sw_gcsr(csr, KVM_CSR_TVAL); if (ktime_before(now, expire))
delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
/*hrtimer not expire */ else {
delta = ktime_to_tick(vcpu, ktime_sub(now, saved_ktime)); if (cfg & CSR_TCFG_PERIOD) {
if (delta >= stable_timer) period = cfg & CSR_TCFG_VAL;
expired = 1; delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
delta = period - (delta % period);
if (expired) { } else
if (timer_cfg & KVM_TCFG_PERIOD) { delta = 0;
new_timertick = (delta - stable_timer) % (timer_cfg & KVM_TCFG_VAL); /*
} else { * inject timer here though sw timer should inject timer
new_timertick = 1; * interrupt async already, since sw timer may be cancelled
} * during injecting intr async in function kvm_acquire_timer
} else { */
new_timertick = stable_timer - delta; _kvm_queue_irq(vcpu, LARCH_INT_TIMER);
} }
new_timertick &= KVM_TCFG_VAL; kvm_write_gcsr_timertick(delta);
kvm_write_gcsr_timercfg(timer_cfg);
kvm_write_gcsr_timertick(new_timertick);
if (expired)
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
} }
/* /*
* kvm_acquire_timer() - Switch to hard timer state.
* @vcpu: Virtual CPU.
* *
* Restore hard timer state on top of existing soft timer state if possible. * Restore hard timer state and enable guest to access timer registers
* without trap
* *
* Since hard timer won't remain active over preemption, preemption should be * it is called with irq disabled
* disabled by the caller.
*/ */
void kvm_acquire_timer(struct kvm_vcpu *vcpu) void kvm_acquire_timer(struct kvm_vcpu *vcpu)
{ {
unsigned long flags, guestcfg; unsigned long cfg;
guestcfg = kvm_read_csr_gcfg(); cfg = kvm_read_csr_gcfg();
if (!(guestcfg & KVM_GCFG_TIT)) if (!(cfg & CSR_GCFG_TIT))
return; return;
/* enable guest access to hard timer */ /* enable guest access to hard timer */
kvm_write_csr_gcfg(guestcfg & ~KVM_GCFG_TIT); kvm_write_csr_gcfg(cfg & ~CSR_GCFG_TIT);
/* /*
* Freeze the soft-timer and sync the guest stable timer with it. We do * Freeze the soft-timer and sync the guest stable timer with it. We do
* this with interrupts disabled to avoid latency. * this with interrupts disabled to avoid latency.
*/ */
local_irq_save(flags);
hrtimer_cancel(&vcpu->arch.swtimer); hrtimer_cancel(&vcpu->arch.swtimer);
local_irq_restore(flags);
} }
/* /*
* _kvm_save_timer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU.
*
* Save guest timer state and switch to software emulation of guest * Save guest timer state and switch to software emulation of guest
* timer. The hard timer must already be in use, so preemption should be * timer. The hard timer must already be in use, so preemption should be
* disabled. * disabled.
*/ */
static ktime_t _kvm_save_timer(struct kvm_vcpu *vcpu, u64 *stable_timer) static void _kvm_save_timer(struct kvm_vcpu *vcpu)
{ {
u64 end_stable_timer; unsigned long ticks, delta;
ktime_t before_time; ktime_t expire;
struct loongarch_csrs *csr = vcpu->arch.csr;
before_time = ktime_get();
/*
* Record a final stable timer which we will transfer to the soft-timer.
*/
end_stable_timer = kvm_read_gcsr_timertick();
*stable_timer = end_stable_timer;
kvm_resume_hrtimer(vcpu, before_time, end_stable_timer); ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
return before_time; delta = tick_to_ns(vcpu, ticks);
expire = ktime_add_ns(ktime_get(), delta);
vcpu->arch.expire = expire;
if (ticks) {
/*
* Update hrtimer to use new timeout
* HRTIMER_MODE_PINNED is suggested since vcpu may run in
* the same physical cpu in next time
*/
hrtimer_cancel(&vcpu->arch.swtimer);
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
} else
/*
* inject timer interrupt so that hall polling can dectect
* and exit
*/
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
} }
/* /*
* kvm_save_timer() - Save guest timer state.
* @vcpu: Virtual CPU.
*
* Save guest timer state and switch to soft guest timer if hard timer was in * Save guest timer state and switch to soft guest timer if hard timer was in
* use. * use.
*/ */
void kvm_save_timer(struct kvm_vcpu *vcpu) void kvm_save_timer(struct kvm_vcpu *vcpu)
{ {
struct loongarch_csrs *csr = vcpu->arch.csr; struct loongarch_csrs *csr = vcpu->arch.csr;
unsigned long guestcfg; unsigned long cfg;
u64 stable_timer = 0;
ktime_t save_ktime;
preempt_disable(); preempt_disable();
guestcfg = kvm_read_csr_gcfg(); cfg = kvm_read_csr_gcfg();
if (!(guestcfg & KVM_GCFG_TIT)) { if (!(cfg & CSR_GCFG_TIT)) {
/* disable guest use of hard timer */ /* disable guest use of hard timer */
kvm_write_csr_gcfg(guestcfg | KVM_GCFG_TIT); kvm_write_csr_gcfg(cfg | CSR_GCFG_TIT);
/* save hard timer state */ /* save hard timer state */
kvm_save_hw_gcsr(csr, KVM_CSR_TCFG); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
if (kvm_read_sw_gcsr(csr, KVM_CSR_TCFG) & KVM_TCFG_EN) { kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
save_ktime = _kvm_save_timer(vcpu, &stable_timer); if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
kvm_write_sw_gcsr(csr, KVM_CSR_TVAL, stable_timer); _kvm_save_timer(vcpu);
vcpu->arch.stable_ktime_saved = save_ktime;
if (stable_timer == IOCSR_TIMER_MASK)
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
} else {
kvm_save_hw_gcsr(csr, KVM_CSR_TVAL);
}
} }
/* save timer-related state to VCPU context */ /* save timer-related state to vCPU context */
kvm_save_hw_gcsr(csr, KVM_CSR_ESTAT); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
preempt_enable(); preempt_enable();
} }
void kvm_reset_timer(struct kvm_vcpu *vcpu) void kvm_reset_timer(struct kvm_vcpu *vcpu)
{ {
kvm_write_gcsr_timercfg(0); kvm_write_gcsr_timercfg(0);
kvm_write_sw_gcsr(vcpu->arch.csr, KVM_CSR_TCFG, 0); kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
hrtimer_cancel(&vcpu->arch.swtimer); hrtimer_cancel(&vcpu->arch.swtimer);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册