提交 c7e3ba64 编写于 作者: M Marc Zyngier

ARM: KVM: arch_timers: Add timer world switch

Do the necessary save/restore dance for the timers in the world
switch code. In the process, allow the guest to read the physical
counter, which is useful for its own clock_event_device.
Reviewed-by: NWill Deacon <will.deacon@arm.com>
Signed-off-by: NChristoffer Dall <c.dall@virtualopensystems.com>
Signed-off-by: NMarc Zyngier <marc.zyngier@arm.com>
上级 53e72406
...@@ -45,7 +45,8 @@ ...@@ -45,7 +45,8 @@
#define c13_TID_URW 23 /* Thread ID, User R/W */ #define c13_TID_URW 23 /* Thread ID, User R/W */
#define c13_TID_URO 24 /* Thread ID, User R/O */ #define c13_TID_URO 24 /* Thread ID, User R/O */
#define c13_TID_PRIV 25 /* Thread ID, Privileged */ #define c13_TID_PRIV 25 /* Thread ID, Privileged */
#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */ #define c14_CNTKCTL 26 /* Timer Control Register (PL1) */
#define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */
#define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1 #define ARM_EXCEPTION_UNDEFINED 1
......
...@@ -179,6 +179,12 @@ int main(void) ...@@ -179,6 +179,12 @@ int main(void)
DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
#endif
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif #endif
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
......
...@@ -718,6 +718,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -718,6 +718,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu_pause(vcpu); vcpu_pause(vcpu);
kvm_vgic_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu);
kvm_timer_flush_hwstate(vcpu);
local_irq_disable(); local_irq_disable();
...@@ -731,6 +732,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -731,6 +732,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
local_irq_enable(); local_irq_enable();
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu);
continue; continue;
} }
...@@ -764,6 +766,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -764,6 +766,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* Back from guest * Back from guest
*************************************************************/ *************************************************************/
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu);
ret = handle_exit(vcpu, run, ret); ret = handle_exit(vcpu, run, ret);
......
...@@ -222,6 +222,10 @@ static const struct coproc_reg cp15_regs[] = { ...@@ -222,6 +222,10 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c13_TID_URO }, NULL, reset_unknown, c13_TID_URO },
{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
NULL, reset_unknown, c13_TID_PRIV }, NULL, reset_unknown, c13_TID_PRIV },
/* CNTKCTL: swapped by interrupt.S. */
{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
NULL, reset_val, c14_CNTKCTL, 0x00000000 },
}; };
/* Target specific emulation tables */ /* Target specific emulation tables */
......
...@@ -300,6 +300,14 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -300,6 +300,14 @@ vcpu .req r0 @ vcpu pointer always in r0
str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
.endif .endif
mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
.if \store_to_vcpu == 0
push {r2}
.else
str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
.endif
.endm .endm
/* /*
...@@ -310,6 +318,14 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -310,6 +318,14 @@ vcpu .req r0 @ vcpu pointer always in r0
* Assumes vcpu pointer in vcpu reg * Assumes vcpu pointer in vcpu reg
*/ */
.macro write_cp15_state read_from_vcpu .macro write_cp15_state read_from_vcpu
.if \read_from_vcpu == 0
pop {r2}
.else
ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
.endif
mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
.if \read_from_vcpu == 0 .if \read_from_vcpu == 0
pop {r2-r12} pop {r2-r12}
.else .else
...@@ -461,8 +477,28 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -461,8 +477,28 @@ vcpu .req r0 @ vcpu pointer always in r0
* for the host. * for the host.
* *
* Assumes vcpu pointer in vcpu reg * Assumes vcpu pointer in vcpu reg
* Clobbers r2-r5
*/ */
.macro save_timer_state .macro save_timer_state
#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
beq 1f
mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
bic r2, #1 @ Clear ENABLE
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
isb
mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL
ldr r4, =VCPU_TIMER_CNTV_CVAL
add r5, vcpu, r4
strd r2, r3, [r5]
1:
#endif
@ Allow physical timer/counter access for the host @ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
...@@ -474,6 +510,7 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -474,6 +510,7 @@ vcpu .req r0 @ vcpu pointer always in r0
* for the host. * for the host.
* *
* Assumes vcpu pointer in vcpu reg * Assumes vcpu pointer in vcpu reg
* Clobbers r2-r5
*/ */
.macro restore_timer_state .macro restore_timer_state
@ Disallow physical timer access for the guest @ Disallow physical timer access for the guest
...@@ -482,6 +519,28 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -482,6 +519,28 @@ vcpu .req r0 @ vcpu pointer always in r0
orr r2, r2, #CNTHCTL_PL1PCTEN orr r2, r2, #CNTHCTL_PL1PCTEN
bic r2, r2, #CNTHCTL_PL1PCEN bic r2, r2, #CNTHCTL_PL1PCEN
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
beq 1f
ldr r2, [r4, #KVM_TIMER_CNTVOFF]
ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
mcrr p15, 4, r2, r3, c14 @ CNTVOFF
ldr r4, =VCPU_TIMER_CNTV_CVAL
add r5, vcpu, r4
ldrd r2, r3, [r5]
mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL
isb
ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
and r2, r2, #3
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
1:
#endif
.endm .endm
.equ vmentry, 0 .equ vmentry, 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册