提交 db26f8f2 编写于 作者: M Marc Zyngier 提交者: Daniel Lezcano

clocksource/drivers/arch_arm_timer: Move workaround synchronisation around

We currently handle synchronisation when workarounds are enabled
by having an ISB in the __arch_counter_get_cnt?ct_stable() helpers.

While this works, this prevents us from relaxing this synchronisation.

Instead, move it closer to the point where the synchronisation is
actually needed. Further patches will subsequently relax this.
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211017124225.3018098-14-maz@kernel.orgSigned-off-by: NDaniel Lezcano <daniel.lezcano@linaro.org>
上级 c1153d52
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
({ \ ({ \
const struct arch_timer_erratum_workaround *__wa; \ const struct arch_timer_erratum_workaround *__wa; \
__wa = __this_cpu_read(timer_unstable_counter_workaround); \ __wa = __this_cpu_read(timer_unstable_counter_workaround); \
(__wa && __wa->h) ? __wa->h : arch_timer_##h; \ (__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \
}) })
#else #else
...@@ -64,11 +64,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, ...@@ -64,11 +64,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
static inline notrace u64 arch_timer_read_cntpct_el0(void) static inline notrace u64 arch_timer_read_cntpct_el0(void)
{ {
isb();
return read_sysreg(cntpct_el0); return read_sysreg(cntpct_el0);
} }
static inline notrace u64 arch_timer_read_cntvct_el0(void) static inline notrace u64 arch_timer_read_cntvct_el0(void)
{ {
isb();
return read_sysreg(cntvct_el0); return read_sysreg(cntvct_el0);
} }
...@@ -163,7 +165,6 @@ static __always_inline u64 __arch_counter_get_cntpct_stable(void) ...@@ -163,7 +165,6 @@ static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{ {
u64 cnt; u64 cnt;
isb();
cnt = arch_timer_reg_read_stable(cntpct_el0); cnt = arch_timer_reg_read_stable(cntpct_el0);
arch_counter_enforce_ordering(cnt); arch_counter_enforce_ordering(cnt);
return cnt; return cnt;
...@@ -183,7 +184,6 @@ static __always_inline u64 __arch_counter_get_cntvct_stable(void) ...@@ -183,7 +184,6 @@ static __always_inline u64 __arch_counter_get_cntvct_stable(void)
{ {
u64 cnt; u64 cnt;
isb();
cnt = arch_timer_reg_read_stable(cntvct_el0); cnt = arch_timer_reg_read_stable(cntvct_el0);
arch_counter_enforce_ordering(cnt); arch_counter_enforce_ordering(cnt);
return cnt; return cnt;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册