提交 e9d2b064 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf: Undo the per cpu-context timer stuff

Revert the timer per cpu-context timers because of unfortunate
nohz interaction. Fixing that would have been somewhat ugly, so
go back to driving things from the regular tick. Provide a
jiffies interval feature for people who want slower rotations.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <20100917093009.519845633@chello.nl>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 917bdd1c
...@@ -870,8 +870,8 @@ struct perf_cpu_context { ...@@ -870,8 +870,8 @@ struct perf_cpu_context {
struct perf_event_context *task_ctx; struct perf_event_context *task_ctx;
int active_oncpu; int active_oncpu;
int exclusive; int exclusive;
u64 timer_interval; struct list_head rotation_list;
struct hrtimer timer; int jiffies_interval;
}; };
struct perf_output_handle { struct perf_output_handle {
...@@ -1065,6 +1065,7 @@ extern int perf_swevent_get_recursion_context(void); ...@@ -1065,6 +1065,7 @@ extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx); extern void perf_swevent_put_recursion_context(int rctx);
extern void perf_event_enable(struct perf_event *event); extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event);
extern void perf_event_task_tick(void);
#else #else
static inline void static inline void
perf_event_task_sched_in(struct task_struct *task) { } perf_event_task_sched_in(struct task_struct *task) { }
...@@ -1099,6 +1100,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; } ...@@ -1099,6 +1100,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { } static inline void perf_swevent_put_recursion_context(int rctx) { }
static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { }
static inline void perf_event_task_tick(void) { }
#endif #endif
#define perf_output_put(handle, x) \ #define perf_output_put(handle, x) \
......
...@@ -77,23 +77,22 @@ void perf_pmu_enable(struct pmu *pmu) ...@@ -77,23 +77,22 @@ void perf_pmu_enable(struct pmu *pmu)
pmu->pmu_enable(pmu); pmu->pmu_enable(pmu);
} }
static DEFINE_PER_CPU(struct list_head, rotation_list);
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_pmu_rotate_start(struct pmu *pmu) static void perf_pmu_rotate_start(struct pmu *pmu)
{ {
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct list_head *head = &__get_cpu_var(rotation_list);
if (hrtimer_active(&cpuctx->timer)) WARN_ON(!irqs_disabled());
return;
__hrtimer_start_range_ns(&cpuctx->timer, if (list_empty(&cpuctx->rotation_list))
ns_to_ktime(cpuctx->timer_interval), 0, list_add(&cpuctx->rotation_list, head);
HRTIMER_MODE_REL_PINNED, 0);
}
static void perf_pmu_rotate_stop(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
hrtimer_cancel(&cpuctx->timer);
} }
static void get_ctx(struct perf_event_context *ctx) static void get_ctx(struct perf_event_context *ctx)
...@@ -1607,36 +1606,33 @@ static void rotate_ctx(struct perf_event_context *ctx) ...@@ -1607,36 +1606,33 @@ static void rotate_ctx(struct perf_event_context *ctx)
} }
/* /*
* Cannot race with ->pmu_rotate_start() because this is ran from hardirq * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* context, and ->pmu_rotate_start() is called with irqs disabled (both are * because they're strictly cpu affine and rotate_start is called with IRQs
* cpu affine, so there are no SMP races). * disabled, while rotate_context is called from IRQ context.
*/ */
static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{ {
enum hrtimer_restart restart = HRTIMER_NORESTART; u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx = NULL; struct perf_event_context *ctx = NULL;
int rotate = 0; int rotate = 0, remove = 1;
cpuctx = container_of(timer, struct perf_cpu_context, timer);
if (cpuctx->ctx.nr_events) { if (cpuctx->ctx.nr_events) {
restart = HRTIMER_RESTART; remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1; rotate = 1;
} }
ctx = cpuctx->task_ctx; ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) { if (ctx && ctx->nr_events) {
restart = HRTIMER_RESTART; remove = 0;
if (ctx->nr_events != ctx->nr_active) if (ctx->nr_events != ctx->nr_active)
rotate = 1; rotate = 1;
} }
perf_pmu_disable(cpuctx->ctx.pmu); perf_pmu_disable(cpuctx->ctx.pmu);
perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval); perf_ctx_adjust_freq(&cpuctx->ctx, interval);
if (ctx) if (ctx)
perf_ctx_adjust_freq(ctx, cpuctx->timer_interval); perf_ctx_adjust_freq(ctx, interval);
if (!rotate) if (!rotate)
goto done; goto done;
...@@ -1654,10 +1650,24 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) ...@@ -1654,10 +1650,24 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
task_ctx_sched_in(ctx, EVENT_FLEXIBLE); task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
done: done:
if (remove)
list_del_init(&cpuctx->rotation_list);
perf_pmu_enable(cpuctx->ctx.pmu); perf_pmu_enable(cpuctx->ctx.pmu);
hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); }
void perf_event_task_tick(void)
{
struct list_head *head = &__get_cpu_var(rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
return restart; WARN_ON(!irqs_disabled());
list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
if (cpuctx->jiffies_interval == 1 ||
!(jiffies % cpuctx->jiffies_interval))
perf_rotate_context(cpuctx);
}
} }
static int event_enable_on_exec(struct perf_event *event, static int event_enable_on_exec(struct perf_event *event,
...@@ -5186,9 +5196,8 @@ int perf_pmu_register(struct pmu *pmu) ...@@ -5186,9 +5196,8 @@ int perf_pmu_register(struct pmu *pmu)
__perf_event_init_context(&cpuctx->ctx); __perf_event_init_context(&cpuctx->ctx);
cpuctx->ctx.type = cpu_context; cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu; cpuctx->ctx.pmu = pmu;
cpuctx->timer_interval = TICK_NSEC; cpuctx->jiffies_interval = 1;
hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); INIT_LIST_HEAD(&cpuctx->rotation_list);
cpuctx->timer.function = perf_event_context_tick;
} }
got_cpu_context: got_cpu_context:
...@@ -6229,6 +6238,7 @@ static void __init perf_event_init_all_cpus(void) ...@@ -6229,6 +6238,7 @@ static void __init perf_event_init_all_cpus(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu); swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex); mutex_init(&swhash->hlist_mutex);
INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
} }
} }
...@@ -6248,6 +6258,15 @@ static void __cpuinit perf_event_init_cpu(int cpu) ...@@ -6248,6 +6258,15 @@ static void __cpuinit perf_event_init_cpu(int cpu)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static void perf_pmu_rotate_stop(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
WARN_ON(!irqs_disabled());
list_del_init(&cpuctx->rotation_list);
}
static void __perf_event_exit_context(void *__info) static void __perf_event_exit_context(void *__info)
{ {
struct perf_event_context *ctx = __info; struct perf_event_context *ctx = __info;
......
...@@ -3584,6 +3584,8 @@ void scheduler_tick(void) ...@@ -3584,6 +3584,8 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0); curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
perf_event_task_tick();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu); rq->idle_at_tick = idle_cpu(cpu);
trigger_load_balance(rq, cpu); trigger_load_balance(rq, cpu);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册