From 93b125cdea877f350bed49bfb9fd74272a3ae750 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Wed, 22 May 2019 12:22:00 +0800 Subject: [PATCH] arm64: vdso: do cntvct workaround in the VDSO hulk inclusion category: performance bugzilla: 16082 CVE: NA ------------------------------------------------- If a cntvct workaround is enabled, read CNTVCT_EL0 twice in VDSO to avoid the clock bug. Test code: static unsigned long long getcycle(void) { unsigned long long cval; asm volatile("isb" : : : "memory"); asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); } int main(void) { int i; struct timeval tv; struct timezone tz; unsigned long long s = getcycle(); for (i = 0; i < 100000000; i++) { gettimeofday(&tv,&tz); } printf("cost:%lld\n", getcycle() - s); return 0; } Before this patchset, it costs 75.78s: [root@localhost yang]# ./gettime cost:3789000522 (20ns per cycle) After this patchset, it costs 3.58s: [root@localhost yang]# ./gettime cost:183208254 (20ns per cycle) Signed-off-by: Yang Yingliang --- arch/arm64/kernel/vdso/gettimeofday.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/vdso/gettimeofday.c b/arch/arm64/kernel/vdso/gettimeofday.c index 12937865bd95..79eaabb4c6fa 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.c +++ b/arch/arm64/kernel/vdso/gettimeofday.c @@ -112,13 +112,24 @@ static notrace u32 vdso_read_retry(const struct vdso_data *vd, u32 start) * Returns the clock delta, in nanoseconds left-shifted by the clock * shift. */ -static notrace u64 get_clock_shifted_nsec(u64 cycle_last, u64 mult) +static notrace u64 get_clock_shifted_nsec(u64 cycle_last, u64 mult, bool vdso_fix) { u64 res; /* Read the virtual counter. */ isb(); asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); + if (vdso_fix) { + u64 new; + int retries = 50; + + asm volatile("mrs %0, cntvct_el0" : "=r" (new) :: "memory"); + while (unlikely((new - res) >> 5) && retries) { + asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); + asm volatile("mrs %0, cntvct_el0" : "=r" (new) :: "memory"); + retries--; + } + } res = res - cycle_last; /* We can only guarantee 56 bits of precision. */ @@ -150,7 +161,7 @@ static __always_inline notrace int do_realtime(const struct vdso_data *vd, } while (unlikely(vdso_read_retry(vd, seq))); - ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult); + ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult, vd->vdso_fix); ns >>= cs_shift; ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; @@ -183,7 +194,7 @@ static notrace int do_monotonic(const struct vdso_data *vd, } while (unlikely(vdso_read_retry(vd, seq))); - ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult); + ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult, vd->vdso_fix); ns >>= cs_shift; ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); @@ -214,7 +225,7 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd, } while (unlikely(vdso_read_retry(vd, seq))); - ns += get_clock_shifted_nsec(cycle_last, cs_raw_mult); + ns += get_clock_shifted_nsec(cycle_last, cs_raw_mult, vd->vdso_fix); ns >>= cs_shift; ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; -- GitLab