提交 82cd6def 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf: Use jump_labels to optimize the scheduler hooks

Trades a call + conditional + ret for an unconditional jmp.
Acked-by: NFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101014203625.501657727@chello.nl>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 8b92538d
...@@ -487,6 +487,7 @@ struct perf_guest_info_callbacks { ...@@ -487,6 +487,7 @@ struct perf_guest_info_callbacks {
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/jump_label_ref.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/local.h> #include <asm/local.h>
...@@ -895,8 +896,30 @@ extern void perf_pmu_unregister(struct pmu *pmu); ...@@ -895,8 +896,30 @@ extern void perf_pmu_unregister(struct pmu *pmu);
extern int perf_num_counters(void); extern int perf_num_counters(void);
extern const char *perf_pmu_name(void); extern const char *perf_pmu_name(void);
extern void perf_event_task_sched_in(struct task_struct *task); extern void __perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
extern atomic_t perf_task_events;
static inline void perf_event_task_sched_in(struct task_struct *task)
{
JUMP_LABEL(&perf_task_events, have_events);
return;
have_events:
__perf_event_task_sched_in(task);
}
static inline
void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
{
JUMP_LABEL(&perf_task_events, have_events);
return;
have_events:
__perf_event_task_sched_out(task, next);
}
extern int perf_event_init_task(struct task_struct *child); extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task); extern void perf_event_free_task(struct task_struct *task);
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
static atomic_t nr_events __read_mostly; atomic_t perf_task_events __read_mostly;
static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly; static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly; static atomic_t nr_task_events __read_mostly;
...@@ -1311,8 +1311,8 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn, ...@@ -1311,8 +1311,8 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn,
* accessing the event control register. If a NMI hits, then it will * accessing the event control register. If a NMI hits, then it will
* not restart the event. * not restart the event.
*/ */
void perf_event_task_sched_out(struct task_struct *task, void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next) struct task_struct *next)
{ {
int ctxn; int ctxn;
...@@ -1337,14 +1337,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx, ...@@ -1337,14 +1337,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
cpuctx->task_ctx = NULL; cpuctx->task_ctx = NULL;
} }
/*
* Called with IRQs disabled
*/
static void __perf_event_task_sched_out(struct perf_event_context *ctx)
{
task_ctx_sched_out(ctx, EVENT_ALL);
}
/* /*
* Called with IRQs disabled * Called with IRQs disabled
*/ */
...@@ -1494,7 +1486,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx) ...@@ -1494,7 +1486,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
* accessing the event control register. If a NMI hits, then it will * accessing the event control register. If a NMI hits, then it will
* keep the event running. * keep the event running.
*/ */
void perf_event_task_sched_in(struct task_struct *task) void __perf_event_task_sched_in(struct task_struct *task)
{ {
struct perf_event_context *ctx; struct perf_event_context *ctx;
int ctxn; int ctxn;
...@@ -2216,7 +2208,8 @@ static void free_event(struct perf_event *event) ...@@ -2216,7 +2208,8 @@ static void free_event(struct perf_event *event)
irq_work_sync(&event->pending); irq_work_sync(&event->pending);
if (!event->parent) { if (!event->parent) {
atomic_dec(&nr_events); if (event->attach_state & PERF_ATTACH_TASK)
jump_label_dec(&perf_task_events);
if (event->attr.mmap || event->attr.mmap_data) if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events); atomic_dec(&nr_mmap_events);
if (event->attr.comm) if (event->attr.comm)
...@@ -5354,7 +5347,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -5354,7 +5347,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->pmu = pmu; event->pmu = pmu;
if (!event->parent) { if (!event->parent) {
atomic_inc(&nr_events); if (event->attach_state & PERF_ATTACH_TASK)
jump_label_inc(&perf_task_events);
if (event->attr.mmap || event->attr.mmap_data) if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events); atomic_inc(&nr_mmap_events);
if (event->attr.comm) if (event->attr.comm)
...@@ -5849,7 +5843,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) ...@@ -5849,7 +5843,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
* our context. * our context.
*/ */
child_ctx = child->perf_event_ctxp[ctxn]; child_ctx = child->perf_event_ctxp[ctxn];
__perf_event_task_sched_out(child_ctx); task_ctx_sched_out(child_ctx, EVENT_ALL);
/* /*
* Take the context lock here so that if find_get_context is * Take the context lock here so that if find_get_context is
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册