提交 0f1e172b 编写于 作者: M Marc Zyngier

KVM: arm64: PMU: Only narrow counters that are not 64bit wide

The current PMU emulation sometimes narrows counters to 32bit
if the counter isn't the cycle counter. As this is going to
change with PMUv3p5 where the counters are all 64bit, fix
the couple of cases where this happens unconditionally.
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Reviewed-by: NReiji Watanabe <reijiw@google.com>
Link: https://lore.kernel.org/r/20221113163832.3154370-7-maz@kernel.org
上级 001d85bd
...@@ -151,20 +151,17 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) ...@@ -151,20 +151,17 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
*/ */
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
{ {
u64 counter, reg, val; u64 reg, val;
if (!pmc->perf_event) if (!pmc->perf_event)
return; return;
counter = kvm_pmu_get_counter_value(vcpu, pmc->idx); val = kvm_pmu_get_counter_value(vcpu, pmc->idx);
if (pmc->idx == ARMV8_PMU_CYCLE_IDX) { if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
reg = PMCCNTR_EL0; reg = PMCCNTR_EL0;
val = counter; else
} else {
reg = PMEVCNTR0_EL0 + pmc->idx; reg = PMEVCNTR0_EL0 + pmc->idx;
val = lower_32_bits(counter);
}
__vcpu_sys_reg(vcpu, reg) = val; __vcpu_sys_reg(vcpu, reg) = val;
...@@ -414,7 +411,8 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, ...@@ -414,7 +411,8 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
/* Increment this counter */ /* Increment this counter */
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
reg = lower_32_bits(reg); if (!kvm_pmu_idx_is_64bit(vcpu, i))
reg = lower_32_bits(reg);
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
/* No overflow? move on */ /* No overflow? move on */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册