From db26f8f2da92471e9f7f71ec78d6fa455cd9c821 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 17 Oct 2021 13:42:21 +0100 Subject: [PATCH] clocksource/drivers/arch_arm_timer: Move workaround synchronisation around We currently handle synchronisation when workarounds are enabled by having an ISB in the __arch_counter_get_cnt?ct_stable() helpers. While this works, this prevents us from relaxing this synchronisation. Instead, move it closer to the point where the synchronisation is actually needed. Further patches will subsequently relax this. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211017124225.3018098-14-maz@kernel.org Signed-off-by: Daniel Lezcano --- arch/arm64/include/asm/arch_timer.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index b8000ef71a2c..519ac1f7f859 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -32,7 +32,7 @@ ({ \ const struct arch_timer_erratum_workaround *__wa; \ __wa = __this_cpu_read(timer_unstable_counter_workaround); \ - (__wa && __wa->h) ? __wa->h : arch_timer_##h; \ + (__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \ }) #else @@ -64,11 +64,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, static inline notrace u64 arch_timer_read_cntpct_el0(void) { + isb(); return read_sysreg(cntpct_el0); } static inline notrace u64 arch_timer_read_cntvct_el0(void) { + isb(); return read_sysreg(cntvct_el0); } @@ -163,7 +165,6 @@ static __always_inline u64 __arch_counter_get_cntpct_stable(void) { u64 cnt; - isb(); cnt = arch_timer_reg_read_stable(cntpct_el0); arch_counter_enforce_ordering(cnt); return cnt; @@ -183,7 +184,6 @@ static __always_inline u64 __arch_counter_get_cntvct_stable(void) { u64 cnt; - isb(); cnt = arch_timer_reg_read_stable(cntvct_el0); arch_counter_enforce_ordering(cnt); return cnt; -- GitLab