From 9717e6cd3db22eade7dbae0fc9235c66325a7132 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Thu, 28 Jan 2010 13:57:44 +0100
Subject: [PATCH] perf_events: Optimize perf_event_task_tick()

Pretty much all of the calls do perf_disable/perf_enable cycles, pull
that out to cut back on hardware programming.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 kernel/perf_event.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 40f8b07c5601..087025fe3ba1 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1573,12 +1573,8 @@ static void rotate_ctx(struct perf_event_context *ctx)
 	raw_spin_lock(&ctx->lock);
 
 	/* Rotate the first entry last of non-pinned groups */
-	perf_disable();
-
 	list_rotate_left(&ctx->flexible_groups);
 
-	perf_enable();
-
 	raw_spin_unlock(&ctx->lock);
 }
 
@@ -1593,6 +1589,8 @@ void perf_event_task_tick(struct task_struct *curr)
 	cpuctx = &__get_cpu_var(perf_cpu_context);
 	ctx = curr->perf_event_ctxp;
 
+	perf_disable();
+
 	perf_ctx_adjust_freq(&cpuctx->ctx);
 	if (ctx)
 		perf_ctx_adjust_freq(ctx);
@@ -1608,6 +1606,8 @@ void perf_event_task_tick(struct task_struct *curr)
 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
 	if (ctx)
 		task_ctx_sched_in(curr, EVENT_FLEXIBLE);
+
+	perf_enable();
 }
 
 static int event_enable_on_exec(struct perf_event *event,
-- 
GitLab