提交 7b4b6658 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf_counter: Fix software counters for fast moving event sources

Reimplement the software counters to deal with fast moving
event sources (such as tracepoints). This means being able
to generate multiple overflows from a single 'event' as well
as support throttling.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 46ab9764
...@@ -3344,87 +3344,81 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, ...@@ -3344,87 +3344,81 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
* Generic software counter infrastructure * Generic software counter infrastructure
*/ */
static void perf_swcounter_update(struct perf_counter *counter) /*
* We directly increment counter->count and keep a second value in
* counter->hw.period_left to count intervals. This period counter
* is kept in the range [-sample_period, 0] so that we can use the
* sign as trigger.
*/
static u64 perf_swcounter_set_period(struct perf_counter *counter)
{ {
struct hw_perf_counter *hwc = &counter->hw; struct hw_perf_counter *hwc = &counter->hw;
u64 prev, now; u64 period = hwc->last_period;
s64 delta; u64 nr, offset;
s64 old, val;
hwc->last_period = hwc->sample_period;
again: again:
prev = atomic64_read(&hwc->prev_count); old = val = atomic64_read(&hwc->period_left);
now = atomic64_read(&hwc->count); if (val < 0)
if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) return 0;
goto again;
delta = now - prev; nr = div64_u64(period + val, period);
offset = nr * period;
val -= offset;
if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
goto again;
atomic64_add(delta, &counter->count); return nr;
atomic64_sub(delta, &hwc->period_left);
} }
static void perf_swcounter_set_period(struct perf_counter *counter) static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct perf_sample_data *data)
{ {
struct hw_perf_counter *hwc = &counter->hw; struct hw_perf_counter *hwc = &counter->hw;
s64 left = atomic64_read(&hwc->period_left); u64 overflow;
s64 period = hwc->sample_period;
if (unlikely(left <= -period)) { data->period = counter->hw.last_period;
left = period; overflow = perf_swcounter_set_period(counter);
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
}
if (unlikely(left <= 0)) { if (hwc->interrupts == MAX_INTERRUPTS)
left += period; return;
atomic64_add(period, &hwc->period_left);
hwc->last_period = period;
}
atomic64_set(&hwc->prev_count, -left); for (; overflow; overflow--) {
atomic64_set(&hwc->count, -left); if (perf_counter_overflow(counter, nmi, data)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
}
} }
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) static void perf_swcounter_unthrottle(struct perf_counter *counter)
{ {
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct perf_counter *counter;
u64 period;
counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
counter->pmu->read(counter);
data.addr = 0;
data.regs = get_irq_regs();
/* /*
* In case we exclude kernel IPs or are somehow not in interrupt * Nothing to do, we already reset hwc->interrupts.
* context, provide the next best thing, the user IP.
*/ */
if ((counter->attr.exclude_kernel || !data.regs) &&
!counter->attr.exclude_user)
data.regs = task_pt_regs(current);
if (data.regs) {
if (perf_counter_overflow(counter, 0, &data))
ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, counter->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
} }
static void perf_swcounter_overflow(struct perf_counter *counter, static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
int nmi, struct perf_sample_data *data) int nmi, struct perf_sample_data *data)
{ {
data->period = counter->hw.last_period; struct hw_perf_counter *hwc = &counter->hw;
perf_swcounter_update(counter); atomic64_add(nr, &counter->count);
perf_swcounter_set_period(counter);
if (perf_counter_overflow(counter, nmi, data)) if (!hwc->sample_period)
/* soft-disable the counter */ return;
;
if (!data->regs)
return;
if (!atomic64_add_negative(nr, &hwc->period_left))
perf_swcounter_overflow(counter, nmi, data);
} }
static int perf_swcounter_is_counting(struct perf_counter *counter) static int perf_swcounter_is_counting(struct perf_counter *counter)
...@@ -3488,15 +3482,6 @@ static int perf_swcounter_match(struct perf_counter *counter, ...@@ -3488,15 +3482,6 @@ static int perf_swcounter_match(struct perf_counter *counter,
return 1; return 1;
} }
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
int nmi, struct perf_sample_data *data)
{
int neg = atomic64_add_negative(nr, &counter->hw.count);
if (counter->hw.sample_period && !neg && data->regs)
perf_swcounter_overflow(counter, nmi, data);
}
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
enum perf_type_id type, enum perf_type_id type,
u32 event, u64 nr, int nmi, u32 event, u64 nr, int nmi,
...@@ -3575,26 +3560,65 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi, ...@@ -3575,26 +3560,65 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,
static void perf_swcounter_read(struct perf_counter *counter) static void perf_swcounter_read(struct perf_counter *counter)
{ {
perf_swcounter_update(counter);
} }
static int perf_swcounter_enable(struct perf_counter *counter) static int perf_swcounter_enable(struct perf_counter *counter)
{ {
struct hw_perf_counter *hwc = &counter->hw;
if (hwc->sample_period) {
hwc->last_period = hwc->sample_period;
perf_swcounter_set_period(counter); perf_swcounter_set_period(counter);
}
return 0; return 0;
} }
static void perf_swcounter_disable(struct perf_counter *counter) static void perf_swcounter_disable(struct perf_counter *counter)
{ {
perf_swcounter_update(counter);
} }
static const struct pmu perf_ops_generic = { static const struct pmu perf_ops_generic = {
.enable = perf_swcounter_enable, .enable = perf_swcounter_enable,
.disable = perf_swcounter_disable, .disable = perf_swcounter_disable,
.read = perf_swcounter_read, .read = perf_swcounter_read,
.unthrottle = perf_swcounter_unthrottle,
}; };
/*
* hrtimer based swcounter callback
*/
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct perf_counter *counter;
u64 period;
counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
counter->pmu->read(counter);
data.addr = 0;
data.regs = get_irq_regs();
/*
* In case we exclude kernel IPs or are somehow not in interrupt
* context, provide the next best thing, the user IP.
*/
if ((counter->attr.exclude_kernel || !data.regs) &&
!counter->attr.exclude_user)
data.regs = task_pt_regs(current);
if (data.regs) {
if (perf_counter_overflow(counter, 0, &data))
ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, counter->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
/* /*
* Software counter: cpu wall time clock * Software counter: cpu wall time clock
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册