trace_sched_wakeup.c 9.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * trace task wakeup timings
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 William Lee Irwin III
 */
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
18
#include <trace/events/sched.h>
19 20 21 22 23 24 25 26

#include "trace.h"

static struct trace_array	*wakeup_trace;
static int __read_mostly	tracer_enabled;

static struct task_struct	*wakeup_task;
static int			wakeup_cpu;
27
static int			wakeup_current_cpu;
28
static unsigned			wakeup_prio = -1;
29
static int			wakeup_rt;
30

31
static arch_spinlock_t wakeup_lock =
32
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
33

I
Ingo Molnar 已提交
34
static void __wakeup_reset(struct trace_array *tr);
35

36 37
static int save_lat_flag;

38
#ifdef CONFIG_FUNCTION_TRACER
39 40 41 42 43 44 45 46 47 48 49
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = wakeup_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
50
	int pc;
51 52 53 54

	if (likely(!wakeup_task))
		return;

55
	pc = preempt_count();
56
	preempt_disable_notrace();
57 58

	cpu = raw_smp_processor_id();
59 60 61
	if (cpu != wakeup_current_cpu)
		goto out_enable;

62 63 64 65 66
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (unlikely(disabled != 1))
		goto out;

67
	local_irq_save(flags);
68

69
	trace_function(tr, ip, parent_ip, flags, pc);
70

71
	local_irq_restore(flags);
72 73 74

 out:
	atomic_dec(&data->disabled);
75
 out_enable:
76
	preempt_enable_notrace();
77 78 79 80 81 82
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = wakeup_tracer_call,
};
83
#endif /* CONFIG_FUNCTION_TRACER */
84

85 86 87
/*
 * Should this new latency be reported/recorded?
 */
I
Ingo Molnar 已提交
88
static int report_latency(cycle_t delta)
89 90 91 92 93 94 95 96 97 98 99
{
	if (tracing_thresh) {
		if (delta < tracing_thresh)
			return 0;
	} else {
		if (delta <= tracing_max_latency)
			return 0;
	}
	return 1;
}

100 101
static void
probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
102 103 104 105 106 107 108
{
	if (task != wakeup_task)
		return;

	wakeup_current_cpu = cpu;
}

M
Mathieu Desnoyers 已提交
109
static void notrace
110 111
probe_wakeup_sched_switch(void *ignore,
			  struct task_struct *prev, struct task_struct *next)
112 113 114 115 116 117
{
	struct trace_array_cpu *data;
	cycle_t T0, T1, delta;
	unsigned long flags;
	long disabled;
	int cpu;
118
	int pc;
119

M
Mathieu Desnoyers 已提交
120 121
	tracing_record_cmdline(prev);

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	if (unlikely(!tracer_enabled))
		return;

	/*
	 * When we start a new trace, we set wakeup_task to NULL
	 * and then set tracer_enabled = 1. We want to make sure
	 * that another CPU does not see the tracer_enabled = 1
	 * and the wakeup_task with an older task, that might
	 * actually be the same as next.
	 */
	smp_rmb();

	if (next != wakeup_task)
		return;

137 138
	pc = preempt_count();

139 140
	/* disable local data, not wakeup_cpu data */
	cpu = raw_smp_processor_id();
M
Mathieu Desnoyers 已提交
141
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
142 143 144
	if (likely(disabled != 1))
		goto out;

145
	local_irq_save(flags);
146
	arch_spin_lock(&wakeup_lock);
147 148 149 150 151

	/* We could race with grabbing wakeup_lock */
	if (unlikely(!tracer_enabled || next != wakeup_task))
		goto out_unlock;

152 153 154
	/* The task we are waiting for is waking up */
	data = wakeup_trace->data[wakeup_cpu];

155
	trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
156
	tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
157 158

	T0 = data->preempt_timestamp;
I
Ingo Molnar 已提交
159
	T1 = ftrace_now(cpu);
160 161 162 163 164
	delta = T1-T0;

	if (!report_latency(delta))
		goto out_unlock;

165 166 167 168
	if (likely(!is_tracing_stopped())) {
		tracing_max_latency = delta;
		update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
	}
169 170

out_unlock:
M
Mathieu Desnoyers 已提交
171
	__wakeup_reset(wakeup_trace);
172
	arch_spin_unlock(&wakeup_lock);
173
	local_irq_restore(flags);
174
out:
M
Mathieu Desnoyers 已提交
175
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
M
Mathieu Desnoyers 已提交
176 177
}

I
Ingo Molnar 已提交
178
static void __wakeup_reset(struct trace_array *tr)
179 180 181 182 183 184 185 186 187 188
{
	wakeup_cpu = -1;
	wakeup_prio = -1;

	if (wakeup_task)
		put_task_struct(wakeup_task);

	wakeup_task = NULL;
}

I
Ingo Molnar 已提交
189
static void wakeup_reset(struct trace_array *tr)
190 191 192
{
	unsigned long flags;

193 194
	tracing_reset_online_cpus(tr);

195
	local_irq_save(flags);
196
	arch_spin_lock(&wakeup_lock);
197
	__wakeup_reset(tr);
198
	arch_spin_unlock(&wakeup_lock);
199
	local_irq_restore(flags);
200 201
}

I
Ingo Molnar 已提交
202
static void
203
probe_wakeup(void *ignore, struct task_struct *p, int success)
204
{
205
	struct trace_array_cpu *data;
206 207 208
	int cpu = smp_processor_id();
	unsigned long flags;
	long disabled;
209
	int pc;
210

M
Mathieu Desnoyers 已提交
211 212 213 214 215 216
	if (likely(!tracer_enabled))
		return;

	tracing_record_cmdline(p);
	tracing_record_cmdline(current);

217
	if ((wakeup_rt && !rt_task(p)) ||
218
			p->prio >= wakeup_prio ||
M
Mathieu Desnoyers 已提交
219
			p->prio >= current->prio)
220 221
		return;

222
	pc = preempt_count();
M
Mathieu Desnoyers 已提交
223
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
224 225 226 227
	if (unlikely(disabled != 1))
		goto out;

	/* interrupts should be off from try_to_wake_up */
228
	arch_spin_lock(&wakeup_lock);
229 230 231 232 233 234

	/* check for races. */
	if (!tracer_enabled || p->prio >= wakeup_prio)
		goto out_locked;

	/* reset the trace */
M
Mathieu Desnoyers 已提交
235
	__wakeup_reset(wakeup_trace);
236 237

	wakeup_cpu = task_cpu(p);
238
	wakeup_current_cpu = wakeup_cpu;
239 240 241 242 243 244 245
	wakeup_prio = p->prio;

	wakeup_task = p;
	get_task_struct(wakeup_task);

	local_save_flags(flags);

246 247
	data = wakeup_trace->data[wakeup_cpu];
	data->preempt_timestamp = ftrace_now(cpu);
248
	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
249 250 251 252 253 254

	/*
	 * We must be careful in using CALLER_ADDR2. But since wake_up
	 * is not called by an assembly function  (where as schedule is)
	 * it should be safe to use it here.
	 */
255
	trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
256 257

out_locked:
258
	arch_spin_unlock(&wakeup_lock);
259
out:
M
Mathieu Desnoyers 已提交
260
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
261 262
}

I
Ingo Molnar 已提交
263
static void start_wakeup_tracer(struct trace_array *tr)
264
{
M
Mathieu Desnoyers 已提交
265 266
	int ret;

267
	ret = register_trace_sched_wakeup(probe_wakeup, NULL);
M
Mathieu Desnoyers 已提交
268
	if (ret) {
M
Mathieu Desnoyers 已提交
269
		pr_info("wakeup trace: Couldn't activate tracepoint"
M
Mathieu Desnoyers 已提交
270 271 272 273
			" probe to kernel_sched_wakeup\n");
		return;
	}

274
	ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
M
Mathieu Desnoyers 已提交
275
	if (ret) {
M
Mathieu Desnoyers 已提交
276
		pr_info("wakeup trace: Couldn't activate tracepoint"
M
Mathieu Desnoyers 已提交
277 278 279 280
			" probe to kernel_sched_wakeup_new\n");
		goto fail_deprobe;
	}

281
	ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
M
Mathieu Desnoyers 已提交
282
	if (ret) {
M
Mathieu Desnoyers 已提交
283
		pr_info("sched trace: Couldn't activate tracepoint"
284
			" probe to kernel_sched_switch\n");
M
Mathieu Desnoyers 已提交
285 286 287
		goto fail_deprobe_wake_new;
	}

288
	ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
289 290 291 292 293 294
	if (ret) {
		pr_info("wakeup trace: Couldn't activate tracepoint"
			" probe to kernel_sched_migrate_task\n");
		return;
	}

295 296 297 298 299 300 301 302 303 304 305
	wakeup_reset(tr);

	/*
	 * Don't let the tracer_enabled = 1 show up before
	 * the wakeup_task is reset. This may be overkill since
	 * wakeup_reset does a spin_unlock after setting the
	 * wakeup_task to NULL, but I want to be safe.
	 * This is a slow path anyway.
	 */
	smp_wmb();

306
	register_ftrace_function(&trace_ops);
307

308
	if (tracing_is_enabled())
309
		tracer_enabled = 1;
310
	else
311
		tracer_enabled = 0;
312

313
	return;
M
Mathieu Desnoyers 已提交
314
fail_deprobe_wake_new:
315
	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
M
Mathieu Desnoyers 已提交
316
fail_deprobe:
317
	unregister_trace_sched_wakeup(probe_wakeup, NULL);
318 319
}

I
Ingo Molnar 已提交
320
static void stop_wakeup_tracer(struct trace_array *tr)
321 322
{
	tracer_enabled = 0;
323
	unregister_ftrace_function(&trace_ops);
324 325 326 327
	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
	unregister_trace_sched_wakeup(probe_wakeup, NULL);
	unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
328 329
}

330
static int __wakeup_tracer_init(struct trace_array *tr)
331
{
332 333 334
	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
	trace_flags |= TRACE_ITER_LATENCY_FMT;

335
	tracing_max_latency = 0;
336
	wakeup_trace = tr;
S
Steven Rostedt 已提交
337
	start_wakeup_tracer(tr);
338
	return 0;
339 340
}

341 342 343 344 345 346 347 348 349 350 351 352
static int wakeup_tracer_init(struct trace_array *tr)
{
	wakeup_rt = 0;
	return __wakeup_tracer_init(tr);
}

static int wakeup_rt_tracer_init(struct trace_array *tr)
{
	wakeup_rt = 1;
	return __wakeup_tracer_init(tr);
}

I
Ingo Molnar 已提交
353
static void wakeup_tracer_reset(struct trace_array *tr)
354
{
S
Steven Rostedt 已提交
355 356 357
	stop_wakeup_tracer(tr);
	/* make sure we put back any tasks we are tracing */
	wakeup_reset(tr);
358 359 360

	if (!save_lat_flag)
		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
361 362
}

363 364 365 366 367 368 369 370 371
static void wakeup_tracer_start(struct trace_array *tr)
{
	wakeup_reset(tr);
	tracer_enabled = 1;
}

static void wakeup_tracer_stop(struct trace_array *tr)
{
	tracer_enabled = 0;
372 373 374 375 376 377 378
}

static struct tracer wakeup_tracer __read_mostly =
{
	.name		= "wakeup",
	.init		= wakeup_tracer_init,
	.reset		= wakeup_tracer_reset,
379 380
	.start		= wakeup_tracer_start,
	.stop		= wakeup_tracer_stop,
381
	.print_max	= 1,
S
Steven Rostedt 已提交
382 383 384
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_wakeup,
#endif
385 386
};

387 388 389 390 391 392 393
static struct tracer wakeup_rt_tracer __read_mostly =
{
	.name		= "wakeup_rt",
	.init		= wakeup_rt_tracer_init,
	.reset		= wakeup_tracer_reset,
	.start		= wakeup_tracer_start,
	.stop		= wakeup_tracer_stop,
394
	.wait_pipe	= poll_wait_pipe,
395 396 397 398 399 400
	.print_max	= 1,
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_wakeup,
#endif
};

401 402 403 404 405 406 407 408
__init static int init_wakeup_tracer(void)
{
	int ret;

	ret = register_tracer(&wakeup_tracer);
	if (ret)
		return ret;

409 410 411 412
	ret = register_tracer(&wakeup_rt_tracer);
	if (ret)
		return ret;

413 414 415
	return 0;
}
device_initcall(init_wakeup_tracer);