提交 49f47433 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf events: Remove arg from perf sched hooks

Since we only ever schedule the local cpu, there is no need to pass the
cpu number to the perf sched hooks.

This micro-optimizes things a bit.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 4cf40131
...@@ -746,10 +746,10 @@ extern int perf_max_events; ...@@ -746,10 +746,10 @@ extern int perf_max_events;
extern const struct pmu *hw_perf_event_init(struct perf_event *event); extern const struct pmu *hw_perf_event_init(struct perf_event *event);
extern void perf_event_task_sched_in(struct task_struct *task, int cpu); extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task, extern void perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu); struct task_struct *next);
extern void perf_event_task_tick(struct task_struct *task, int cpu); extern void perf_event_task_tick(struct task_struct *task);
extern int perf_event_init_task(struct task_struct *child); extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task); extern void perf_event_free_task(struct task_struct *task);
...@@ -870,12 +870,12 @@ extern void perf_event_enable(struct perf_event *event); ...@@ -870,12 +870,12 @@ extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event);
#else #else
static inline void static inline void
perf_event_task_sched_in(struct task_struct *task, int cpu) { } perf_event_task_sched_in(struct task_struct *task) { }
static inline void static inline void
perf_event_task_sched_out(struct task_struct *task, perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu) { } struct task_struct *next) { }
static inline void static inline void
perf_event_task_tick(struct task_struct *task, int cpu) { } perf_event_task_tick(struct task_struct *task) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_free_task(struct task_struct *task) { }
......
...@@ -1170,9 +1170,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx, ...@@ -1170,9 +1170,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
* not restart the event. * not restart the event.
*/ */
void perf_event_task_sched_out(struct task_struct *task, void perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu) struct task_struct *next)
{ {
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp; struct perf_event_context *ctx = task->perf_event_ctxp;
struct perf_event_context *next_ctx; struct perf_event_context *next_ctx;
struct perf_event_context *parent; struct perf_event_context *parent;
...@@ -1252,8 +1252,9 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) ...@@ -1252,8 +1252,9 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
static void static void
__perf_event_sched_in(struct perf_event_context *ctx, __perf_event_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx, int cpu) struct perf_cpu_context *cpuctx)
{ {
int cpu = smp_processor_id();
struct perf_event *event; struct perf_event *event;
int can_add_hw = 1; int can_add_hw = 1;
...@@ -1326,24 +1327,24 @@ __perf_event_sched_in(struct perf_event_context *ctx, ...@@ -1326,24 +1327,24 @@ __perf_event_sched_in(struct perf_event_context *ctx,
* accessing the event control register. If a NMI hits, then it will * accessing the event control register. If a NMI hits, then it will
* keep the event running. * keep the event running.
*/ */
void perf_event_task_sched_in(struct task_struct *task, int cpu) void perf_event_task_sched_in(struct task_struct *task)
{ {
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp; struct perf_event_context *ctx = task->perf_event_ctxp;
if (likely(!ctx)) if (likely(!ctx))
return; return;
if (cpuctx->task_ctx == ctx) if (cpuctx->task_ctx == ctx)
return; return;
__perf_event_sched_in(ctx, cpuctx, cpu); __perf_event_sched_in(ctx, cpuctx);
cpuctx->task_ctx = ctx; cpuctx->task_ctx = ctx;
} }
static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx)
{ {
struct perf_event_context *ctx = &cpuctx->ctx; struct perf_event_context *ctx = &cpuctx->ctx;
__perf_event_sched_in(ctx, cpuctx, cpu); __perf_event_sched_in(ctx, cpuctx);
} }
#define MAX_INTERRUPTS (~0ULL) #define MAX_INTERRUPTS (~0ULL)
...@@ -1461,7 +1462,7 @@ static void rotate_ctx(struct perf_event_context *ctx) ...@@ -1461,7 +1462,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
void perf_event_task_tick(struct task_struct *curr, int cpu) void perf_event_task_tick(struct task_struct *curr)
{ {
struct perf_cpu_context *cpuctx; struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx; struct perf_event_context *ctx;
...@@ -1469,7 +1470,7 @@ void perf_event_task_tick(struct task_struct *curr, int cpu) ...@@ -1469,7 +1470,7 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
if (!atomic_read(&nr_events)) if (!atomic_read(&nr_events))
return; return;
cpuctx = &per_cpu(perf_cpu_context, cpu); cpuctx = &__get_cpu_var(perf_cpu_context);
ctx = curr->perf_event_ctxp; ctx = curr->perf_event_ctxp;
perf_ctx_adjust_freq(&cpuctx->ctx); perf_ctx_adjust_freq(&cpuctx->ctx);
...@@ -1484,9 +1485,9 @@ void perf_event_task_tick(struct task_struct *curr, int cpu) ...@@ -1484,9 +1485,9 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
if (ctx) if (ctx)
rotate_ctx(ctx); rotate_ctx(ctx);
perf_event_cpu_sched_in(cpuctx, cpu); perf_event_cpu_sched_in(cpuctx);
if (ctx) if (ctx)
perf_event_task_sched_in(curr, cpu); perf_event_task_sched_in(curr);
} }
/* /*
...@@ -1527,7 +1528,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) ...@@ -1527,7 +1528,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
perf_event_task_sched_in(task, smp_processor_id()); perf_event_task_sched_in(task);
out: out:
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -2752,7 +2752,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) ...@@ -2752,7 +2752,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/ */
prev_state = prev->state; prev_state = prev->state;
finish_arch_switch(prev); finish_arch_switch(prev);
perf_event_task_sched_in(current, cpu_of(rq)); perf_event_task_sched_in(current);
finish_lock_switch(rq, prev); finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current); fire_sched_in_preempt_notifiers(current);
...@@ -5266,7 +5266,7 @@ void scheduler_tick(void) ...@@ -5266,7 +5266,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0); curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
perf_event_task_tick(curr, cpu); perf_event_task_tick(curr);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu); rq->idle_at_tick = idle_cpu(cpu);
...@@ -5480,7 +5480,7 @@ asmlinkage void __sched schedule(void) ...@@ -5480,7 +5480,7 @@ asmlinkage void __sched schedule(void)
if (likely(prev != next)) { if (likely(prev != next)) {
sched_info_switch(prev, next); sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next, cpu); perf_event_task_sched_out(prev, next);
rq->nr_switches++; rq->nr_switches++;
rq->curr = next; rq->curr = next;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册