提交 c6e5ca35 编写于 作者: A Andy Lutomirski 提交者: Ingo Molnar

x86/asm/tsc: Inline native_read_tsc() and remove __native_read_tsc()

In the following commit:

  cdc7957d ("x86: move native_read_tsc() offline")

... native_read_tsc() was moved out of line, presumably for some
now-obsolete vDSO-related reason. Undo it.

The entire rdtsc, shl, or sequence is only 11 bytes, and calls
via rdtscl() and similar helpers were already inlined.
Signed-off-by: NAndy Lutomirski <luto@kernel.org>
Signed-off-by: NBorislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm ML <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/d05ffe2aaf8468ca475ebc00efad7b2fa174af19.1434501121.git.luto@kernel.orgSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 c73e36b7
...@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void) ...@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void)
* but no one has ever seen it happen. * but no one has ever seen it happen.
*/ */
rdtsc_barrier(); rdtsc_barrier();
ret = (cycle_t)__native_read_tsc(); ret = (cycle_t)native_read_tsc();
last = gtod->cycle_last; last = gtod->cycle_last;
......
...@@ -106,12 +106,10 @@ notrace static inline int native_write_msr_safe(unsigned int msr, ...@@ -106,12 +106,10 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
return err; return err;
} }
extern unsigned long long native_read_tsc(void);
extern int rdmsr_safe_regs(u32 regs[8]); extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]);
static __always_inline unsigned long long __native_read_tsc(void) static __always_inline unsigned long long native_read_tsc(void)
{ {
DECLARE_ARGS(val, low, high); DECLARE_ARGS(val, low, high);
...@@ -181,10 +179,10 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) ...@@ -181,10 +179,10 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
} }
#define rdtscl(low) \ #define rdtscl(low) \
((low) = (u32)__native_read_tsc()) ((low) = (u32)native_read_tsc())
#define rdtscll(val) \ #define rdtscll(val) \
((val) = __native_read_tsc()) ((val) = native_read_tsc())
#define rdpmc(counter, low, high) \ #define rdpmc(counter, low, high) \
do { \ do { \
......
...@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) ...@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
static __always_inline static __always_inline
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
{ {
u64 delta = __native_read_tsc() - src->tsc_timestamp; u64 delta = native_read_tsc() - src->tsc_timestamp;
return pvclock_scale_delta(delta, src->tsc_to_system_mul, return pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift); src->tsc_shift);
} }
......
...@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void) ...@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void)
* on during the bootup the random pool has true entropy too. * on during the bootup the random pool has true entropy too.
*/ */
get_random_bytes(&canary, sizeof(canary)); get_random_bytes(&canary, sizeof(canary));
tsc = __native_read_tsc(); tsc = native_read_tsc();
canary += tsc + (tsc << 32UL); canary += tsc + (tsc << 32UL);
current->stack_canary = canary; current->stack_canary = canary;
......
...@@ -42,7 +42,7 @@ static __always_inline cycles_t vget_cycles(void) ...@@ -42,7 +42,7 @@ static __always_inline cycles_t vget_cycles(void)
if (!cpu_has_tsc) if (!cpu_has_tsc)
return 0; return 0;
#endif #endif
return (cycles_t)__native_read_tsc(); return (cycles_t)native_read_tsc();
} }
extern void tsc_init(void); extern void tsc_init(void);
......
...@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void) ...@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void)
old = dw_apb_clocksource_read(clocksource_apbt); old = dw_apb_clocksource_read(clocksource_apbt);
old += loop; old += loop;
t1 = __native_read_tsc(); t1 = native_read_tsc();
do { do {
new = dw_apb_clocksource_read(clocksource_apbt); new = dw_apb_clocksource_read(clocksource_apbt);
} while (new < old); } while (new < old);
t2 = __native_read_tsc(); t2 = native_read_tsc();
shift = 5; shift = 5;
if (unlikely(loop >> shift == 0)) { if (unlikely(loop >> shift == 0)) {
......
...@@ -308,12 +308,6 @@ unsigned long long ...@@ -308,12 +308,6 @@ unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock"))); sched_clock(void) __attribute__((alias("native_sched_clock")));
#endif #endif
unsigned long long native_read_tsc(void)
{
return __native_read_tsc();
}
EXPORT_SYMBOL(native_read_tsc);
int check_tsc_unstable(void) int check_tsc_unstable(void)
{ {
return tsc_unstable; return tsc_unstable;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册