提交 109a10b4 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

Revert "arm64: vdso: do cntvct workaround in the VDSO"

hulk inclusion
category: performance
bugzilla: 16082
CVE: NA

-------------------------------------------------

This reverts commit 487f18a5f1cfc1ed7949c1f3cf535a0d963d81c6.
487f18a5f1cf ("arm64: vdso: do cntvct workaround in the VDSO") and
47819486652f ("arm64: arch_timer: Disable CNTVCT_EL0 trap if workaround is enabled")
are not needed for now. Apply these two patches later.
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 162457d2
...@@ -112,24 +112,13 @@ static notrace u32 vdso_read_retry(const struct vdso_data *vd, u32 start) ...@@ -112,24 +112,13 @@ static notrace u32 vdso_read_retry(const struct vdso_data *vd, u32 start)
* Returns the clock delta, in nanoseconds left-shifted by the clock * Returns the clock delta, in nanoseconds left-shifted by the clock
* shift. * shift.
*/ */
static notrace u64 get_clock_shifted_nsec(u64 cycle_last, u64 mult, bool vdso_fix) static notrace u64 get_clock_shifted_nsec(u64 cycle_last, u64 mult)
{ {
u64 res; u64 res;
/* Read the virtual counter. */ /* Read the virtual counter. */
isb(); isb();
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
if (vdso_fix) {
u64 new;
int retries = 50;
asm volatile("mrs %0, cntvct_el0" : "=r" (new) :: "memory");
while (unlikely((new - res) >> 5) && retries) {
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
asm volatile("mrs %0, cntvct_el0" : "=r" (new) :: "memory");
retries--;
}
}
res = res - cycle_last; res = res - cycle_last;
/* We can only guarantee 56 bits of precision. */ /* We can only guarantee 56 bits of precision. */
...@@ -161,7 +150,7 @@ static __always_inline notrace int do_realtime(const struct vdso_data *vd, ...@@ -161,7 +150,7 @@ static __always_inline notrace int do_realtime(const struct vdso_data *vd,
} while (unlikely(vdso_read_retry(vd, seq))); } while (unlikely(vdso_read_retry(vd, seq)));
ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult, vd->vdso_fix); ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult);
ns >>= cs_shift; ns >>= cs_shift;
ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns; ts->tv_nsec = ns;
...@@ -194,7 +183,7 @@ static notrace int do_monotonic(const struct vdso_data *vd, ...@@ -194,7 +183,7 @@ static notrace int do_monotonic(const struct vdso_data *vd,
} while (unlikely(vdso_read_retry(vd, seq))); } while (unlikely(vdso_read_retry(vd, seq)));
ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult, vd->vdso_fix); ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult);
ns >>= cs_shift; ns >>= cs_shift;
ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
...@@ -225,7 +214,7 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd, ...@@ -225,7 +214,7 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd,
} while (unlikely(vdso_read_retry(vd, seq))); } while (unlikely(vdso_read_retry(vd, seq)));
ns += get_clock_shifted_nsec(cycle_last, cs_raw_mult, vd->vdso_fix); ns += get_clock_shifted_nsec(cycle_last, cs_raw_mult);
ns >>= cs_shift; ns >>= cs_shift;
ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns; ts->tv_nsec = ns;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册