未验证 提交 1661fbec 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!1617 LoonArch: KVM: fix vcpu timer

Merge Pull Request from: @ddjsaiwqq 
 
Fix vcpu timer initialize, saved, restore and triggered functions.
As the previous code does not init vcpu timer, and the timer value
should add time value instead of ticks value to calculate the expire 
time when vcpu timer triggered.
 
 
Link:https://gitee.com/openeuler/kernel/pulls/1617 

Reviewed-by: Kevin Zhu <zhukeqian1@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
......@@ -220,16 +220,9 @@ struct kvm_vcpu_arch {
/* vcpu's vpid is different on each host cpu in an smp system */
u64 vpid[NR_CPUS];
/* Period of stable timer tick in ns */
u64 timer_period;
/* Frequency of stable timer in Hz */
u64 timer_mhz;
/* Stable bias from the raw time */
u64 timer_bias;
/* Dynamic nanosecond bias (multiple of timer_period) to avoid overflow */
s64 timer_dyn_bias;
/* Save ktime */
ktime_t stable_ktime_saved;
ktime_t expire;
u64 core_ext_ioisr[4];
......
......@@ -101,10 +101,10 @@ void kvm_restore_hw_perf(struct kvm_vcpu *vcpu);
void kvm_acquire_timer(struct kvm_vcpu *vcpu);
void kvm_reset_timer(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu);
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
void kvm_save_timer(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
/*
* Loongarch KVM guest interrupt handling.
......
......@@ -499,20 +499,11 @@ static int _kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
return ret;
}
/* low level hrtimer wake routine */
static enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
kvm_vcpu_wake_up(vcpu);
return kvm_count_timeout(vcpu);
}
static void _kvm_vcpu_init(struct kvm_vcpu *vcpu)
{
int i;
unsigned long timer_hz;
struct loongarch_csrs *csr = vcpu->arch.csr;
for_each_possible_cpu(i)
vcpu->arch.vpid[i] = 0;
......@@ -522,6 +513,21 @@ static void _kvm_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
vcpu->arch.fpu_enabled = true;
vcpu->arch.lsx_enabled = true;
/*
* Initialize guest register state to valid architectural reset state.
*/
timer_hz = calc_const_freq();
kvm_init_timer(vcpu, timer_hz);
/* Set Initialize mode for GUEST */
kvm_write_sw_gcsr(csr, KVM_CSR_CRMD, KVM_CRMD_DA);
/* Set cpuid */
kvm_write_sw_gcsr(csr, KVM_CSR_TMID, vcpu->vcpu_id);
/* start with no pending virtual guest interrupts */
csr->csrs[KVM_CSR_GINTC] = 0;
}
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
......@@ -1776,29 +1782,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return 0;
}
/* Initial guest state */
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct loongarch_csrs *csr = vcpu->arch.csr;
unsigned long timer_hz;
/*
* Initialize guest register state to valid architectural reset state.
*/
timer_hz = calc_const_freq();
kvm_init_timer(vcpu, timer_hz);
/* Set Initialize mode for GUEST */
kvm_write_sw_gcsr(csr, KVM_CSR_CRMD, KVM_CRMD_DA);
/* Set cpuid */
kvm_write_sw_gcsr(csr, KVM_CSR_TMID, vcpu->vcpu_id);
/* start with no pending virtual guest interrupts */
csr->csrs[KVM_CSR_GINTC] = 0;
return 0;
}
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
......
......@@ -19,256 +19,195 @@
#include <asm/inst.h>
#include "kvmcpu.h"
#include "trace.h"
#include "kvm_compat.h"
/*
* ktime_to_tick() - Scale ktime_t to a 64-bit stable timer.
*
* Caches the dynamic nanosecond bias in vcpu->arch.timer_dyn_bias.
* ktime_to_tick() - Scale ktime_t to timer tick value.
*/
static u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
{
s64 now_ns, periods;
u64 delta;
now_ns = ktime_to_ns(now);
delta = now_ns + vcpu->arch.timer_dyn_bias;
if (delta >= vcpu->arch.timer_period) {
/* If delta is out of safe range the bias needs adjusting */
periods = div64_s64(now_ns, vcpu->arch.timer_period);
vcpu->arch.timer_dyn_bias = -periods * vcpu->arch.timer_period;
/* Recalculate delta with new bias */
delta = now_ns + vcpu->arch.timer_dyn_bias;
}
/*
* We've ensured that:
* delta < timer_period
*/
delta = ktime_to_ns(now);
return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
}
/**
* kvm_resume_hrtimer() - Resume hrtimer, updating expiry.
* @vcpu: Virtual CPU.
* @now: ktime at point of resume.
* @stable_timer: stable timer at point of resume.
*
* Resumes the timer and updates the timer expiry based on @now and @count.
*/
static void kvm_resume_hrtimer(struct kvm_vcpu *vcpu, ktime_t now, u64 stable_timer)
static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
{
u64 delta;
ktime_t expire;
/* Stable timer decreased to zero or
* initialize to zero, set 4 second timer
*/
delta = div_u64(stable_timer * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
expire = ktime_add_ns(now, delta);
/* Update hrtimer to use new timeout */
hrtimer_cancel(&vcpu->arch.swtimer);
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
}
/**
* kvm_init_timer() - Initialise stable timer.
* @vcpu: Virtual CPU.
* @timer_hz: Frequency of timer.
*
* Initialise the timer to the specified frequency, zero it, and set it going if
* it's enabled.
/*
* Push timer forward on timeout.
* Handle an hrtimer event by push the hrtimer forward a period.
*/
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
{
ktime_t now;
unsigned long ticks;
struct loongarch_csrs *csr = vcpu->arch.csr;
unsigned long cfg, period;
vcpu->arch.timer_mhz = timer_hz >> 20;
vcpu->arch.timer_period = div_u64((u64)MNSEC_PER_SEC * IOCSR_TIMER_MASK, vcpu->arch.timer_mhz);
vcpu->arch.timer_dyn_bias = 0;
/* Add periodic tick to current expire time */
cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
if (cfg & CSR_TCFG_PERIOD) {
period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
}
/* Starting at 0 */
ticks = 0;
now = ktime_get();
vcpu->arch.timer_bias = ticks - ktime_to_tick(vcpu, now);
vcpu->arch.timer_bias &= IOCSR_TIMER_MASK;
/* low level hrtimer wake routine */
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
kvm_write_sw_gcsr(csr, KVM_CSR_TVAL, ticks);
vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
rcuwait_wake_up(&vcpu->wait);
return kvm_count_timeout(vcpu);
}
/**
* kvm_count_timeout() - Push timer forward on timeout.
* @vcpu: Virtual CPU.
*
* Handle an hrtimer event by push the hrtimer forward a period.
*
* Returns: The hrtimer_restart value to return to the hrtimer subsystem.
/*
* Initialise the timer to the specified frequency, zero it
*/
enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
{
unsigned long timer_cfg;
vcpu->arch.timer_mhz = timer_hz >> 20;
/* Add the Count period to the current expiry time */
timer_cfg = kvm_read_sw_gcsr(vcpu->arch.csr, KVM_CSR_TCFG);
if (timer_cfg & KVM_TCFG_PERIOD) {
hrtimer_add_expires_ns(&vcpu->arch.swtimer, timer_cfg & KVM_TCFG_VAL);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
/* Starting at 0 */
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
}
/*
* kvm_restore_timer() - Restore timer state.
* @vcpu: Virtual CPU.
*
* Restore soft timer state from saved context.
*/
void kvm_restore_timer(struct kvm_vcpu *vcpu)
{
struct loongarch_csrs *csr = vcpu->arch.csr;
ktime_t saved_ktime, now;
u64 stable_timer, new_timertick = 0;
u64 delta = 0;
int expired = 0;
unsigned long timer_cfg;
ktime_t expire, now;
unsigned long cfg, delta, period;
/*
* Set guest stable timer cfg csr
*/
timer_cfg = kvm_read_sw_gcsr(csr, KVM_CSR_TCFG);
kvm_restore_hw_gcsr(csr, KVM_CSR_ESTAT);
if (!(timer_cfg & KVM_TCFG_EN)) {
kvm_restore_hw_gcsr(csr, KVM_CSR_TCFG);
kvm_restore_hw_gcsr(csr, KVM_CSR_TVAL);
cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
if (!(cfg & CSR_TCFG_EN)) {
/* guest timer is disabled, just restore timer registers */
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
return;
}
/*
* set remainder tick value if not expired
*/
now = ktime_get();
saved_ktime = vcpu->arch.stable_ktime_saved;
stable_timer = kvm_read_sw_gcsr(csr, KVM_CSR_TVAL);
/*hrtimer not expire */
delta = ktime_to_tick(vcpu, ktime_sub(now, saved_ktime));
if (delta >= stable_timer)
expired = 1;
if (expired) {
if (timer_cfg & KVM_TCFG_PERIOD) {
new_timertick = (delta - stable_timer) % (timer_cfg & KVM_TCFG_VAL);
} else {
new_timertick = 1;
}
} else {
new_timertick = stable_timer - delta;
expire = vcpu->arch.expire;
if (ktime_before(now, expire))
delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
else {
if (cfg & CSR_TCFG_PERIOD) {
period = cfg & CSR_TCFG_VAL;
delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
delta = period - (delta % period);
} else
delta = 0;
/*
* inject timer here though sw timer should inject timer
* interrupt async already, since sw timer may be cancelled
* during injecting intr async in function kvm_acquire_timer
*/
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
}
new_timertick &= KVM_TCFG_VAL;
kvm_write_gcsr_timercfg(timer_cfg);
kvm_write_gcsr_timertick(new_timertick);
if (expired)
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
kvm_write_gcsr_timertick(delta);
}
/*
* kvm_acquire_timer() - Switch to hard timer state.
* @vcpu: Virtual CPU.
*
* Restore hard timer state on top of existing soft timer state if possible.
* Restore hard timer state and enable guest to access timer registers
* without trap
*
* Since hard timer won't remain active over preemption, preemption should be
* disabled by the caller.
* it is called with irq disabled
*/
void kvm_acquire_timer(struct kvm_vcpu *vcpu)
{
unsigned long flags, guestcfg;
unsigned long cfg;
guestcfg = kvm_read_csr_gcfg();
if (!(guestcfg & KVM_GCFG_TIT))
cfg = kvm_read_csr_gcfg();
if (!(cfg & CSR_GCFG_TIT))
return;
/* enable guest access to hard timer */
kvm_write_csr_gcfg(guestcfg & ~KVM_GCFG_TIT);
kvm_write_csr_gcfg(cfg & ~CSR_GCFG_TIT);
/*
* Freeze the soft-timer and sync the guest stable timer with it. We do
* this with interrupts disabled to avoid latency.
*/
local_irq_save(flags);
hrtimer_cancel(&vcpu->arch.swtimer);
local_irq_restore(flags);
}
/*
* _kvm_save_timer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU.
*
* Save guest timer state and switch to software emulation of guest
* timer. The hard timer must already be in use, so preemption should be
* disabled.
*/
static ktime_t _kvm_save_timer(struct kvm_vcpu *vcpu, u64 *stable_timer)
static void _kvm_save_timer(struct kvm_vcpu *vcpu)
{
u64 end_stable_timer;
ktime_t before_time;
before_time = ktime_get();
/*
* Record a final stable timer which we will transfer to the soft-timer.
*/
end_stable_timer = kvm_read_gcsr_timertick();
*stable_timer = end_stable_timer;
unsigned long ticks, delta;
ktime_t expire;
struct loongarch_csrs *csr = vcpu->arch.csr;
kvm_resume_hrtimer(vcpu, before_time, end_stable_timer);
return before_time;
ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
delta = tick_to_ns(vcpu, ticks);
expire = ktime_add_ns(ktime_get(), delta);
vcpu->arch.expire = expire;
if (ticks) {
/*
* Update hrtimer to use new timeout
* HRTIMER_MODE_PINNED is suggested since vcpu may run in
* the same physical cpu in next time
*/
hrtimer_cancel(&vcpu->arch.swtimer);
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
} else
/*
* inject timer interrupt so that hall polling can dectect
* and exit
*/
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
}
/*
* kvm_save_timer() - Save guest timer state.
* @vcpu: Virtual CPU.
*
* Save guest timer state and switch to soft guest timer if hard timer was in
* use.
*/
void kvm_save_timer(struct kvm_vcpu *vcpu)
{
struct loongarch_csrs *csr = vcpu->arch.csr;
unsigned long guestcfg;
u64 stable_timer = 0;
ktime_t save_ktime;
unsigned long cfg;
preempt_disable();
guestcfg = kvm_read_csr_gcfg();
if (!(guestcfg & KVM_GCFG_TIT)) {
cfg = kvm_read_csr_gcfg();
if (!(cfg & CSR_GCFG_TIT)) {
/* disable guest use of hard timer */
kvm_write_csr_gcfg(guestcfg | KVM_GCFG_TIT);
kvm_write_csr_gcfg(cfg | CSR_GCFG_TIT);
/* save hard timer state */
kvm_save_hw_gcsr(csr, KVM_CSR_TCFG);
if (kvm_read_sw_gcsr(csr, KVM_CSR_TCFG) & KVM_TCFG_EN) {
save_ktime = _kvm_save_timer(vcpu, &stable_timer);
kvm_write_sw_gcsr(csr, KVM_CSR_TVAL, stable_timer);
vcpu->arch.stable_ktime_saved = save_ktime;
if (stable_timer == IOCSR_TIMER_MASK)
_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
} else {
kvm_save_hw_gcsr(csr, KVM_CSR_TVAL);
}
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
_kvm_save_timer(vcpu);
}
/* save timer-related state to VCPU context */
kvm_save_hw_gcsr(csr, KVM_CSR_ESTAT);
/* save timer-related state to vCPU context */
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
preempt_enable();
}
void kvm_reset_timer(struct kvm_vcpu *vcpu)
{
kvm_write_gcsr_timercfg(0);
kvm_write_sw_gcsr(vcpu->arch.csr, KVM_CSR_TCFG, 0);
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
hrtimer_cancel(&vcpu->arch.swtimer);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册