提交 59eaef78 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

x86/tsc: Remodel cyc2ns to use seqcount_latch()

Replace the custom multi-value scheme with the more regular
seqcount_latch() scheme. Along with scrapping a lot of lines, the latch
scheme is better documented and used in more places.

The immediate benefit however is not being limited on the update side.
The current code has a limit where the writers block which is hit by
future changes.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 8309f86c
...@@ -2255,7 +2255,7 @@ static struct pmu pmu = { ...@@ -2255,7 +2255,7 @@ static struct pmu pmu = {
void arch_perf_update_userpage(struct perf_event *event, void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg, u64 now) struct perf_event_mmap_page *userpg, u64 now)
{ {
struct cyc2ns_data *data; struct cyc2ns_data data;
u64 offset; u64 offset;
userpg->cap_user_time = 0; userpg->cap_user_time = 0;
...@@ -2267,17 +2267,17 @@ void arch_perf_update_userpage(struct perf_event *event, ...@@ -2267,17 +2267,17 @@ void arch_perf_update_userpage(struct perf_event *event,
if (!using_native_sched_clock() || !sched_clock_stable()) if (!using_native_sched_clock() || !sched_clock_stable())
return; return;
data = cyc2ns_read_begin(); cyc2ns_read_begin(&data);
offset = data->cyc2ns_offset + __sched_clock_offset; offset = data.cyc2ns_offset + __sched_clock_offset;
/* /*
* Internal timekeeping for enabled/running/stopped times * Internal timekeeping for enabled/running/stopped times
* is always in the local_clock domain. * is always in the local_clock domain.
*/ */
userpg->cap_user_time = 1; userpg->cap_user_time = 1;
userpg->time_mult = data->cyc2ns_mul; userpg->time_mult = data.cyc2ns_mul;
userpg->time_shift = data->cyc2ns_shift; userpg->time_shift = data.cyc2ns_shift;
userpg->time_offset = offset - now; userpg->time_offset = offset - now;
/* /*
...@@ -2289,7 +2289,7 @@ void arch_perf_update_userpage(struct perf_event *event, ...@@ -2289,7 +2289,7 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->time_zero = offset; userpg->time_zero = offset;
} }
cyc2ns_read_end(data); cyc2ns_read_end();
} }
void void
......
...@@ -29,11 +29,9 @@ struct cyc2ns_data { ...@@ -29,11 +29,9 @@ struct cyc2ns_data {
u32 cyc2ns_mul; u32 cyc2ns_mul;
u32 cyc2ns_shift; u32 cyc2ns_shift;
u64 cyc2ns_offset; u64 cyc2ns_offset;
u32 __count; }; /* 16 bytes */
/* u32 hole */
}; /* 24 bytes -- do not grow */
extern struct cyc2ns_data *cyc2ns_read_begin(void); extern void cyc2ns_read_begin(struct cyc2ns_data *);
extern void cyc2ns_read_end(struct cyc2ns_data *); extern void cyc2ns_read_end(void);
#endif /* _ASM_X86_TIMER_H */ #endif /* _ASM_X86_TIMER_H */
...@@ -51,115 +51,34 @@ static u32 art_to_tsc_denominator; ...@@ -51,115 +51,34 @@ static u32 art_to_tsc_denominator;
static u64 art_to_tsc_offset; static u64 art_to_tsc_offset;
struct clocksource *art_related_clocksource; struct clocksource *art_related_clocksource;
/*
* Use a ring-buffer like data structure, where a writer advances the head by
* writing a new data entry and a reader advances the tail when it observes a
* new entry.
*
* Writers are made to wait on readers until there's space to write a new
* entry.
*
* This means that we can always use an {offset, mul} pair to compute a ns
* value that is 'roughly' in the right direction, even if we're writing a new
* {offset, mul} pair during the clock read.
*
* The down-side is that we can no longer guarantee strict monotonicity anymore
* (assuming the TSC was that to begin with), because while we compute the
* intersection point of the two clock slopes and make sure the time is
* continuous at the point of switching; we can no longer guarantee a reader is
* strictly before or after the switch point.
*
* It does mean a reader no longer needs to disable IRQs in order to avoid
* CPU-Freq updates messing with his times, and similarly an NMI reader will
* no longer run the risk of hitting half-written state.
*/
struct cyc2ns { struct cyc2ns {
struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */ struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
struct cyc2ns_data *head; /* 48 + 8 = 56 */ seqcount_t seq; /* 32 + 4 = 36 */
struct cyc2ns_data *tail; /* 56 + 8 = 64 */
}; /* exactly fits one cacheline */
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); }; /* fits one cacheline */
struct cyc2ns_data *cyc2ns_read_begin(void)
{
struct cyc2ns_data *head;
preempt_disable(); static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
head = this_cpu_read(cyc2ns.head);
/*
* Ensure we observe the entry when we observe the pointer to it.
* matches the wmb from cyc2ns_write_end().
*/
smp_read_barrier_depends();
head->__count++;
barrier();
return head;
}
void cyc2ns_read_end(struct cyc2ns_data *head) void cyc2ns_read_begin(struct cyc2ns_data *data)
{ {
barrier(); int seq, idx;
/*
* If we're the outer most nested read; update the tail pointer
* when we're done. This notifies possible pending writers
* that we've observed the head pointer and that the other
* entry is now free.
*/
if (!--head->__count) {
/*
* x86-TSO does not reorder writes with older reads;
* therefore once this write becomes visible to another
* cpu, we must be finished reading the cyc2ns_data.
*
* matches with cyc2ns_write_begin().
*/
this_cpu_write(cyc2ns.tail, head);
}
preempt_enable();
}
/* preempt_disable_notrace();
* Begin writing a new @data entry for @cpu.
*
* Assumes some sort of write side lock; currently 'provided' by the assumption
* that cpufreq will call its notifiers sequentially.
*/
static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
{
struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
struct cyc2ns_data *data = c2n->data;
if (data == c2n->head)
data++;
/* XXX send an IPI to @cpu in order to guarantee a read? */ do {
seq = this_cpu_read(cyc2ns.seq.sequence);
idx = seq & 1;
/* data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
* When we observe the tail write from cyc2ns_read_end(), data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
* the cpu must be done with that entry and its safe data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
* to start writing to it.
*/
while (c2n->tail == data)
cpu_relax();
return data; } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
} }
static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) void cyc2ns_read_end(void)
{ {
struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); preempt_enable_notrace();
/*
* Ensure the @data writes are visible before we publish the
* entry. Matches the data-depencency in cyc2ns_read_begin().
*/
smp_wmb();
ACCESS_ONCE(c2n->head) = data;
} }
/* /*
...@@ -191,7 +110,6 @@ static void cyc2ns_data_init(struct cyc2ns_data *data) ...@@ -191,7 +110,6 @@ static void cyc2ns_data_init(struct cyc2ns_data *data)
data->cyc2ns_mul = 0; data->cyc2ns_mul = 0;
data->cyc2ns_shift = 0; data->cyc2ns_shift = 0;
data->cyc2ns_offset = 0; data->cyc2ns_offset = 0;
data->__count = 0;
} }
static void cyc2ns_init(int cpu) static void cyc2ns_init(int cpu)
...@@ -201,43 +119,20 @@ static void cyc2ns_init(int cpu) ...@@ -201,43 +119,20 @@ static void cyc2ns_init(int cpu)
cyc2ns_data_init(&c2n->data[0]); cyc2ns_data_init(&c2n->data[0]);
cyc2ns_data_init(&c2n->data[1]); cyc2ns_data_init(&c2n->data[1]);
c2n->head = c2n->data; seqcount_init(&c2n->seq);
c2n->tail = c2n->data;
} }
static inline unsigned long long cycles_2_ns(unsigned long long cyc) static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{ {
struct cyc2ns_data *data, *tail; struct cyc2ns_data data;
unsigned long long ns; unsigned long long ns;
/* cyc2ns_read_begin(&data);
* See cyc2ns_read_*() for details; replicated in order to avoid
* an extra few instructions that came with the abstraction.
* Notable, it allows us to only do the __count and tail update
* dance when its actually needed.
*/
preempt_disable_notrace();
data = this_cpu_read(cyc2ns.head);
tail = this_cpu_read(cyc2ns.tail);
if (likely(data == tail)) {
ns = data->cyc2ns_offset;
ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
} else {
data->__count++;
barrier(); ns = data.cyc2ns_offset;
ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
ns = data->cyc2ns_offset; cyc2ns_read_end();
ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
barrier();
if (!--data->__count)
this_cpu_write(cyc2ns.tail, data);
}
preempt_enable_notrace();
return ns; return ns;
} }
...@@ -245,7 +140,8 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) ...@@ -245,7 +140,8 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
static void set_cyc2ns_scale(unsigned long khz, int cpu) static void set_cyc2ns_scale(unsigned long khz, int cpu)
{ {
unsigned long long tsc_now, ns_now; unsigned long long tsc_now, ns_now;
struct cyc2ns_data *data; struct cyc2ns_data data;
struct cyc2ns *c2n;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
...@@ -254,8 +150,6 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu) ...@@ -254,8 +150,6 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
if (!khz) if (!khz)
goto done; goto done;
data = cyc2ns_write_begin(cpu);
tsc_now = rdtsc(); tsc_now = rdtsc();
ns_now = cycles_2_ns(tsc_now); ns_now = cycles_2_ns(tsc_now);
...@@ -264,7 +158,7 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu) ...@@ -264,7 +158,7 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
* time function is continuous; see the comment near struct * time function is continuous; see the comment near struct
* cyc2ns_data. * cyc2ns_data.
*/ */
clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz, clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
NSEC_PER_MSEC, 0); NSEC_PER_MSEC, 0);
/* /*
...@@ -273,15 +167,20 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu) ...@@ -273,15 +167,20 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
* conversion algorithm shifting a 32-bit value (now specifies a 64-bit * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
* value) - refer perf_event_mmap_page documentation in perf_event.h. * value) - refer perf_event_mmap_page documentation in perf_event.h.
*/ */
if (data->cyc2ns_shift == 32) { if (data.cyc2ns_shift == 32) {
data->cyc2ns_shift = 31; data.cyc2ns_shift = 31;
data->cyc2ns_mul >>= 1; data.cyc2ns_mul >>= 1;
} }
data->cyc2ns_offset = ns_now - data.cyc2ns_offset = ns_now -
mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift); mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
c2n = per_cpu_ptr(&cyc2ns, cpu);
cyc2ns_write_end(cpu, data); raw_write_seqcount_latch(&c2n->seq);
c2n->data[0] = data;
raw_write_seqcount_latch(&c2n->seq);
c2n->data[1] = data;
done: done:
sched_clock_idle_wakeup_event(0); sched_clock_idle_wakeup_event(0);
......
...@@ -456,12 +456,13 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) ...@@ -456,12 +456,13 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
*/ */
static inline unsigned long long cycles_2_ns(unsigned long long cyc) static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{ {
struct cyc2ns_data *data = cyc2ns_read_begin(); struct cyc2ns_data data;
unsigned long long ns; unsigned long long ns;
ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); cyc2ns_read_begin(&data);
ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
cyc2ns_read_end();
cyc2ns_read_end(data);
return ns; return ns;
} }
...@@ -470,12 +471,13 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) ...@@ -470,12 +471,13 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
*/ */
static inline unsigned long long ns_2_cycles(unsigned long long ns) static inline unsigned long long ns_2_cycles(unsigned long long ns)
{ {
struct cyc2ns_data *data = cyc2ns_read_begin(); struct cyc2ns_data data;
unsigned long long cyc; unsigned long long cyc;
cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul; cyc2ns_read_begin(&data);
cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
cyc2ns_read_end();
cyc2ns_read_end(data);
return cyc; return cyc;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册