trace_sched_wakeup.c 8.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * trace task wakeup timings
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 William Lee Irwin III
 */
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
M
Mathieu Desnoyers 已提交
18
#include <trace/sched.h>
19 20 21 22 23 24 25 26 27 28

#include "trace.h"

static struct trace_array	*wakeup_trace;
static int __read_mostly	tracer_enabled;

static struct task_struct	*wakeup_task;
static int			wakeup_cpu;
static unsigned			wakeup_prio = -1;

29 30
static raw_spinlock_t wakeup_lock =
	(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
31

I
Ingo Molnar 已提交
32
static void __wakeup_reset(struct trace_array *tr);
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
#ifdef CONFIG_FTRACE
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = wakeup_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int resched;
	int cpu;

	if (likely(!wakeup_task))
		return;

	resched = need_resched();
	preempt_disable_notrace();

	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (unlikely(disabled != 1))
		goto out;

60 61
	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);
62 63 64 65 66 67 68 69 70 71 72 73 74 75

	if (unlikely(!wakeup_task))
		goto unlock;

	/*
	 * The task can't disappear because it needs to
	 * wake up first, and we have the wakeup_lock.
	 */
	if (task_cpu(wakeup_task) != cpu)
		goto unlock;

	trace_function(tr, data, ip, parent_ip, flags);

 unlock:
76 77
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

 out:
	atomic_dec(&data->disabled);

	/*
	 * To prevent recursion from the scheduler, if the
	 * resched flag was set before we entered, then
	 * don't reschedule.
	 */
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = wakeup_tracer_call,
};
#endif /* CONFIG_FTRACE */

99 100 101
/*
 * Should this new latency be reported/recorded?
 */
I
Ingo Molnar 已提交
102
static int report_latency(cycle_t delta)
103 104 105 106 107 108 109 110 111 112 113
{
	if (tracing_thresh) {
		if (delta < tracing_thresh)
			return 0;
	} else {
		if (delta <= tracing_max_latency)
			return 0;
	}
	return 1;
}

M
Mathieu Desnoyers 已提交
114
static void notrace
M
Mathieu Desnoyers 已提交
115
probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
M
Mathieu Desnoyers 已提交
116
	struct task_struct *next)
117 118 119 120 121 122 123 124
{
	unsigned long latency = 0, t0 = 0, t1 = 0;
	struct trace_array_cpu *data;
	cycle_t T0, T1, delta;
	unsigned long flags;
	long disabled;
	int cpu;

M
Mathieu Desnoyers 已提交
125 126
	tracing_record_cmdline(prev);

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
	if (unlikely(!tracer_enabled))
		return;

	/*
	 * When we start a new trace, we set wakeup_task to NULL
	 * and then set tracer_enabled = 1. We want to make sure
	 * that another CPU does not see the tracer_enabled = 1
	 * and the wakeup_task with an older task, that might
	 * actually be the same as next.
	 */
	smp_rmb();

	if (next != wakeup_task)
		return;

142
	/* The task we are waiting for is waking up */
M
Mathieu Desnoyers 已提交
143
	data = wakeup_trace->data[wakeup_cpu];
144 145 146

	/* disable local data, not wakeup_cpu data */
	cpu = raw_smp_processor_id();
M
Mathieu Desnoyers 已提交
147
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
148 149 150
	if (likely(disabled != 1))
		goto out;

151 152
	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);
153 154 155 156 157

	/* We could race with grabbing wakeup_lock */
	if (unlikely(!tracer_enabled || next != wakeup_task))
		goto out_unlock;

M
Mathieu Desnoyers 已提交
158
	trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags);
159 160 161 162 163 164

	/*
	 * usecs conversion is slow so we try to delay the conversion
	 * as long as possible:
	 */
	T0 = data->preempt_timestamp;
I
Ingo Molnar 已提交
165
	T1 = ftrace_now(cpu);
166 167 168 169 170 171 172 173 174 175 176
	delta = T1-T0;

	if (!report_latency(delta))
		goto out_unlock;

	latency = nsecs_to_usecs(delta);

	tracing_max_latency = delta;
	t0 = nsecs_to_usecs(T0);
	t1 = nsecs_to_usecs(T1);

M
Mathieu Desnoyers 已提交
177
	update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
178 179

out_unlock:
M
Mathieu Desnoyers 已提交
180
	__wakeup_reset(wakeup_trace);
181 182
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
183
out:
M
Mathieu Desnoyers 已提交
184
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
M
Mathieu Desnoyers 已提交
185 186
}

I
Ingo Molnar 已提交
187
static void __wakeup_reset(struct trace_array *tr)
188 189 190 191 192 193
{
	struct trace_array_cpu *data;
	int cpu;

	for_each_possible_cpu(cpu) {
		data = tr->data[cpu];
194
		tracing_reset(tr, cpu);
195 196 197 198 199 200 201 202 203 204 205
	}

	wakeup_cpu = -1;
	wakeup_prio = -1;

	if (wakeup_task)
		put_task_struct(wakeup_task);

	wakeup_task = NULL;
}

I
Ingo Molnar 已提交
206
static void wakeup_reset(struct trace_array *tr)
207 208 209
{
	unsigned long flags;

210 211
	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);
212
	__wakeup_reset(tr);
213 214
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
215 216
}

I
Ingo Molnar 已提交
217
static void
M
Mathieu Desnoyers 已提交
218
probe_wakeup(struct rq *rq, struct task_struct *p)
219 220 221 222 223
{
	int cpu = smp_processor_id();
	unsigned long flags;
	long disabled;

M
Mathieu Desnoyers 已提交
224 225 226 227 228 229
	if (likely(!tracer_enabled))
		return;

	tracing_record_cmdline(p);
	tracing_record_cmdline(current);

230 231
	if (likely(!rt_task(p)) ||
			p->prio >= wakeup_prio ||
M
Mathieu Desnoyers 已提交
232
			p->prio >= current->prio)
233 234
		return;

M
Mathieu Desnoyers 已提交
235
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
236 237 238 239
	if (unlikely(disabled != 1))
		goto out;

	/* interrupts should be off from try_to_wake_up */
240
	__raw_spin_lock(&wakeup_lock);
241 242 243 244 245 246

	/* check for races. */
	if (!tracer_enabled || p->prio >= wakeup_prio)
		goto out_locked;

	/* reset the trace */
M
Mathieu Desnoyers 已提交
247
	__wakeup_reset(wakeup_trace);
248 249 250 251 252 253 254 255 256

	wakeup_cpu = task_cpu(p);
	wakeup_prio = p->prio;

	wakeup_task = p;
	get_task_struct(wakeup_task);

	local_save_flags(flags);

M
Mathieu Desnoyers 已提交
257 258
	wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
	trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
259
		       CALLER_ADDR1, CALLER_ADDR2, flags);
260 261

out_locked:
262
	__raw_spin_unlock(&wakeup_lock);
263
out:
M
Mathieu Desnoyers 已提交
264
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
265 266
}

I
Ingo Molnar 已提交
267
static void start_wakeup_tracer(struct trace_array *tr)
268
{
M
Mathieu Desnoyers 已提交
269 270
	int ret;

M
Mathieu Desnoyers 已提交
271
	ret = register_trace_sched_wakeup(probe_wakeup);
M
Mathieu Desnoyers 已提交
272
	if (ret) {
M
Mathieu Desnoyers 已提交
273
		pr_info("wakeup trace: Couldn't activate tracepoint"
M
Mathieu Desnoyers 已提交
274 275 276 277
			" probe to kernel_sched_wakeup\n");
		return;
	}

M
Mathieu Desnoyers 已提交
278
	ret = register_trace_sched_wakeup_new(probe_wakeup);
M
Mathieu Desnoyers 已提交
279
	if (ret) {
M
Mathieu Desnoyers 已提交
280
		pr_info("wakeup trace: Couldn't activate tracepoint"
M
Mathieu Desnoyers 已提交
281 282 283 284
			" probe to kernel_sched_wakeup_new\n");
		goto fail_deprobe;
	}

M
Mathieu Desnoyers 已提交
285
	ret = register_trace_sched_switch(probe_wakeup_sched_switch);
M
Mathieu Desnoyers 已提交
286
	if (ret) {
M
Mathieu Desnoyers 已提交
287
		pr_info("sched trace: Couldn't activate tracepoint"
M
Mathieu Desnoyers 已提交
288 289 290 291
			" probe to kernel_sched_schedule\n");
		goto fail_deprobe_wake_new;
	}

292 293 294 295 296 297 298 299 300 301 302
	wakeup_reset(tr);

	/*
	 * Don't let the tracer_enabled = 1 show up before
	 * the wakeup_task is reset. This may be overkill since
	 * wakeup_reset does a spin_unlock after setting the
	 * wakeup_task to NULL, but I want to be safe.
	 * This is a slow path anyway.
	 */
	smp_wmb();

303
	register_ftrace_function(&trace_ops);
304

305 306
	tracer_enabled = 1;

307
	return;
M
Mathieu Desnoyers 已提交
308
fail_deprobe_wake_new:
M
Mathieu Desnoyers 已提交
309
	unregister_trace_sched_wakeup_new(probe_wakeup);
M
Mathieu Desnoyers 已提交
310
fail_deprobe:
M
Mathieu Desnoyers 已提交
311
	unregister_trace_sched_wakeup(probe_wakeup);
312 313
}

I
Ingo Molnar 已提交
314
static void stop_wakeup_tracer(struct trace_array *tr)
315 316
{
	tracer_enabled = 0;
317
	unregister_ftrace_function(&trace_ops);
M
Mathieu Desnoyers 已提交
318 319 320
	unregister_trace_sched_switch(probe_wakeup_sched_switch);
	unregister_trace_sched_wakeup_new(probe_wakeup);
	unregister_trace_sched_wakeup(probe_wakeup);
321 322
}

I
Ingo Molnar 已提交
323
static void wakeup_tracer_init(struct trace_array *tr)
324 325 326 327 328 329 330
{
	wakeup_trace = tr;

	if (tr->ctrl)
		start_wakeup_tracer(tr);
}

I
Ingo Molnar 已提交
331
static void wakeup_tracer_reset(struct trace_array *tr)
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
{
	if (tr->ctrl) {
		stop_wakeup_tracer(tr);
		/* make sure we put back any tasks we are tracing */
		wakeup_reset(tr);
	}
}

static void wakeup_tracer_ctrl_update(struct trace_array *tr)
{
	if (tr->ctrl)
		start_wakeup_tracer(tr);
	else
		stop_wakeup_tracer(tr);
}

I
Ingo Molnar 已提交
348
static void wakeup_tracer_open(struct trace_iterator *iter)
349 350 351 352 353 354
{
	/* stop the trace while dumping */
	if (iter->tr->ctrl)
		stop_wakeup_tracer(iter->tr);
}

I
Ingo Molnar 已提交
355
static void wakeup_tracer_close(struct trace_iterator *iter)
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
{
	/* forget about any processes we were recording */
	if (iter->tr->ctrl)
		start_wakeup_tracer(iter->tr);
}

static struct tracer wakeup_tracer __read_mostly =
{
	.name		= "wakeup",
	.init		= wakeup_tracer_init,
	.reset		= wakeup_tracer_reset,
	.open		= wakeup_tracer_open,
	.close		= wakeup_tracer_close,
	.ctrl_update	= wakeup_tracer_ctrl_update,
	.print_max	= 1,
S
Steven Rostedt 已提交
371 372 373
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_wakeup,
#endif
374 375 376 377 378 379 380 381 382 383 384 385 386
};

__init static int init_wakeup_tracer(void)
{
	int ret;

	ret = register_tracer(&wakeup_tracer);
	if (ret)
		return ret;

	return 0;
}
device_initcall(init_wakeup_tracer);