提交 022ecb96 编写于 作者: N Nicholas Piggin 提交者: Michael Ellerman

KVM: PPC: Book3S HV P9: Demand fault TM facility registers

Use HFSCR facility disabling to implement demand faulting for TM, with
a hysteresis counter similar to the load_fp etc counters in context
switching that implement the equivalent demand faulting for userspace
facilities.

This speeds up guest entry/exit by avoiding the register save/restore
when a guest is not frequently using them. When a guest does use them
often, there will be some additional demand fault overhead, but these
are not commonly used facilities.
Signed-off-by: NNicholas Piggin <npiggin@gmail.com>
Reviewed-by: NFabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-38-npiggin@gmail.com
上级 a3e18ca8
...@@ -580,6 +580,9 @@ struct kvm_vcpu_arch { ...@@ -580,6 +580,9 @@ struct kvm_vcpu_arch {
ulong ppr; ulong ppr;
u32 pspb; u32 pspb;
u8 load_ebb; u8 load_ebb;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
u8 load_tm;
#endif
ulong fscr; ulong fscr;
ulong shadow_fscr; ulong shadow_fscr;
ulong ebbhr; ulong ebbhr;
......
...@@ -1446,6 +1446,16 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu) ...@@ -1446,6 +1446,16 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
return RESUME_GUEST; return RESUME_GUEST;
} }
static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
return EMULATE_FAIL;
vcpu->arch.hfscr |= HFSCR_TM;
return RESUME_GUEST;
}
static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
struct task_struct *tsk) struct task_struct *tsk)
{ {
...@@ -1739,6 +1749,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1739,6 +1749,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
r = kvmppc_pmu_unavailable(vcpu); r = kvmppc_pmu_unavailable(vcpu);
if (cause == FSCR_EBB_LG) if (cause == FSCR_EBB_LG)
r = kvmppc_ebb_unavailable(vcpu); r = kvmppc_ebb_unavailable(vcpu);
if (cause == FSCR_TM_LG)
r = kvmppc_tm_unavailable(vcpu);
} }
if (r == EMULATE_FAIL) { if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
...@@ -2783,9 +2795,9 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) ...@@ -2783,9 +2795,9 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
/* /*
* PM, EBB is demand-faulted so start with it clear. * PM, EBB, TM are demand-faulted so start with it clear.
*/ */
vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB); vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
kvmppc_mmu_book3s_hv_init(vcpu); kvmppc_mmu_book3s_hv_init(vcpu);
...@@ -3868,8 +3880,9 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns ...@@ -3868,8 +3880,9 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
msr |= MSR_VEC; msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX)) if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX; msr |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_TM) || if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
(vcpu->arch.hfscr & HFSCR_TM))
msr |= MSR_TM; msr |= MSR_TM;
msr = msr_check_and_set(msr); msr = msr_check_and_set(msr);
...@@ -4608,8 +4621,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) ...@@ -4608,8 +4621,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
msr |= MSR_VEC; msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX)) if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX; msr |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_TM) || if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
(vcpu->arch.hfscr & HFSCR_TM))
msr |= MSR_TM; msr |= MSR_TM;
msr = msr_check_and_set(msr); msr = msr_check_and_set(msr);
......
...@@ -310,7 +310,7 @@ bool load_vcpu_state(struct kvm_vcpu *vcpu, ...@@ -310,7 +310,7 @@ bool load_vcpu_state(struct kvm_vcpu *vcpu,
if (MSR_TM_ACTIVE(guest_msr)) { if (MSR_TM_ACTIVE(guest_msr)) {
kvmppc_restore_tm_hv(vcpu, guest_msr, true); kvmppc_restore_tm_hv(vcpu, guest_msr, true);
ret = true; ret = true;
} else { } else if (vcpu->arch.hfscr & HFSCR_TM) {
mtspr(SPRN_TEXASR, vcpu->arch.texasr); mtspr(SPRN_TEXASR, vcpu->arch.texasr);
mtspr(SPRN_TFHAR, vcpu->arch.tfhar); mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
mtspr(SPRN_TFIAR, vcpu->arch.tfiar); mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
...@@ -346,10 +346,16 @@ void store_vcpu_state(struct kvm_vcpu *vcpu) ...@@ -346,10 +346,16 @@ void store_vcpu_state(struct kvm_vcpu *vcpu)
unsigned long guest_msr = vcpu->arch.shregs.msr; unsigned long guest_msr = vcpu->arch.shregs.msr;
if (MSR_TM_ACTIVE(guest_msr)) { if (MSR_TM_ACTIVE(guest_msr)) {
kvmppc_save_tm_hv(vcpu, guest_msr, true); kvmppc_save_tm_hv(vcpu, guest_msr, true);
} else { } else if (vcpu->arch.hfscr & HFSCR_TM) {
vcpu->arch.texasr = mfspr(SPRN_TEXASR); vcpu->arch.texasr = mfspr(SPRN_TEXASR);
vcpu->arch.tfhar = mfspr(SPRN_TFHAR); vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
vcpu->arch.tfiar = mfspr(SPRN_TFIAR); vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
if (!vcpu->arch.nested) {
vcpu->arch.load_tm++; /* see load_ebb comment */
if (!vcpu->arch.load_tm)
vcpu->arch.hfscr &= ~HFSCR_TM;
}
} }
} }
#endif #endif
...@@ -641,8 +647,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -641,8 +647,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
msr |= MSR_VEC; msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX)) if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX; msr |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_TM) || if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
(vcpu->arch.hfscr & HFSCR_TM))
msr |= MSR_TM; msr |= MSR_TM;
msr = msr_check_and_set(msr); msr = msr_check_and_set(msr);
/* Save MSR for restore. This is after hard disable, so EE is clear. */ /* Save MSR for restore. This is after hard disable, so EE is clear. */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册