trace_sched_wakeup.c 8.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * trace task wakeup timings
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 William Lee Irwin III
 */
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
M
Mathieu Desnoyers 已提交
18
#include <trace/sched.h>
19 20 21 22 23 24 25 26 27

#include "trace.h"

static struct trace_array	*wakeup_trace;
static int __read_mostly	tracer_enabled;

static struct task_struct	*wakeup_task;
static int			wakeup_cpu;
static unsigned			wakeup_prio = -1;
28
static int			wakeup_rt;
29

30 31
static raw_spinlock_t wakeup_lock =
	(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
32

I
Ingo Molnar 已提交
33
static void __wakeup_reset(struct trace_array *tr);
34

35
#ifdef CONFIG_FUNCTION_TRACER
36 37 38 39 40 41 42 43 44 45 46 47
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = wakeup_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int resched;
	int cpu;
48
	int pc;
49 50 51 52

	if (likely(!wakeup_task))
		return;

53
	pc = preempt_count();
54
	resched = ftrace_preempt_disable();
55 56 57 58 59 60 61

	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (unlikely(disabled != 1))
		goto out;

62 63
	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);
64 65 66 67 68 69 70 71 72 73 74

	if (unlikely(!wakeup_task))
		goto unlock;

	/*
	 * The task can't disappear because it needs to
	 * wake up first, and we have the wakeup_lock.
	 */
	if (task_cpu(wakeup_task) != cpu)
		goto unlock;

75
	trace_function(tr, ip, parent_ip, flags, pc);
76 77

 unlock:
78 79
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
80 81 82 83

 out:
	atomic_dec(&data->disabled);

84
	ftrace_preempt_enable(resched);
85 86 87 88 89 90
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = wakeup_tracer_call,
};
91
#endif /* CONFIG_FUNCTION_TRACER */
92

93 94 95
/*
 * Should this new latency be reported/recorded?
 */
I
Ingo Molnar 已提交
96
static int report_latency(cycle_t delta)
97 98 99 100 101 102 103 104 105 106 107
{
	if (tracing_thresh) {
		if (delta < tracing_thresh)
			return 0;
	} else {
		if (delta <= tracing_max_latency)
			return 0;
	}
	return 1;
}

M
Mathieu Desnoyers 已提交
108
static void notrace
M
Mathieu Desnoyers 已提交
109
probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
M
Mathieu Desnoyers 已提交
110
	struct task_struct *next)
111 112 113 114 115 116 117
{
	unsigned long latency = 0, t0 = 0, t1 = 0;
	struct trace_array_cpu *data;
	cycle_t T0, T1, delta;
	unsigned long flags;
	long disabled;
	int cpu;
118
	int pc;
119

M
Mathieu Desnoyers 已提交
120 121
	tracing_record_cmdline(prev);

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	if (unlikely(!tracer_enabled))
		return;

	/*
	 * When we start a new trace, we set wakeup_task to NULL
	 * and then set tracer_enabled = 1. We want to make sure
	 * that another CPU does not see the tracer_enabled = 1
	 * and the wakeup_task with an older task, that might
	 * actually be the same as next.
	 */
	smp_rmb();

	if (next != wakeup_task)
		return;

137 138
	pc = preempt_count();

139
	/* The task we are waiting for is waking up */
M
Mathieu Desnoyers 已提交
140
	data = wakeup_trace->data[wakeup_cpu];
141 142 143

	/* disable local data, not wakeup_cpu data */
	cpu = raw_smp_processor_id();
M
Mathieu Desnoyers 已提交
144
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
145 146 147
	if (likely(disabled != 1))
		goto out;

148 149
	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);
150 151 152 153 154

	/* We could race with grabbing wakeup_lock */
	if (unlikely(!tracer_enabled || next != wakeup_task))
		goto out_unlock;

155 156
	trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
	tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
157 158 159 160 161 162

	/*
	 * usecs conversion is slow so we try to delay the conversion
	 * as long as possible:
	 */
	T0 = data->preempt_timestamp;
I
Ingo Molnar 已提交
163
	T1 = ftrace_now(cpu);
164 165 166 167 168 169 170 171 172 173 174
	delta = T1-T0;

	if (!report_latency(delta))
		goto out_unlock;

	latency = nsecs_to_usecs(delta);

	tracing_max_latency = delta;
	t0 = nsecs_to_usecs(T0);
	t1 = nsecs_to_usecs(T1);

M
Mathieu Desnoyers 已提交
175
	update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
176 177

out_unlock:
M
Mathieu Desnoyers 已提交
178
	__wakeup_reset(wakeup_trace);
179 180
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
181
out:
M
Mathieu Desnoyers 已提交
182
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
M
Mathieu Desnoyers 已提交
183 184
}

I
Ingo Molnar 已提交
185
static void __wakeup_reset(struct trace_array *tr)
186 187 188
{
	int cpu;

189
	for_each_possible_cpu(cpu)
190
		tracing_reset(tr, cpu);
191 192 193 194 195 196 197 198 199 200

	wakeup_cpu = -1;
	wakeup_prio = -1;

	if (wakeup_task)
		put_task_struct(wakeup_task);

	wakeup_task = NULL;
}

I
Ingo Molnar 已提交
201
static void wakeup_reset(struct trace_array *tr)
202 203 204
{
	unsigned long flags;

205 206
	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);
207
	__wakeup_reset(tr);
208 209
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
210 211
}

I
Ingo Molnar 已提交
212
static void
213
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
214
{
215
	struct trace_array_cpu *data;
216 217 218
	int cpu = smp_processor_id();
	unsigned long flags;
	long disabled;
219
	int pc;
220

M
Mathieu Desnoyers 已提交
221 222 223 224 225 226
	if (likely(!tracer_enabled))
		return;

	tracing_record_cmdline(p);
	tracing_record_cmdline(current);

227
	if ((wakeup_rt && !rt_task(p)) ||
228
			p->prio >= wakeup_prio ||
M
Mathieu Desnoyers 已提交
229
			p->prio >= current->prio)
230 231
		return;

232
	pc = preempt_count();
M
Mathieu Desnoyers 已提交
233
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
234 235 236 237
	if (unlikely(disabled != 1))
		goto out;

	/* interrupts should be off from try_to_wake_up */
238
	__raw_spin_lock(&wakeup_lock);
239 240 241 242 243 244

	/* check for races. */
	if (!tracer_enabled || p->prio >= wakeup_prio)
		goto out_locked;

	/* reset the trace */
M
Mathieu Desnoyers 已提交
245
	__wakeup_reset(wakeup_trace);
246 247 248 249 250 251 252 253 254

	wakeup_cpu = task_cpu(p);
	wakeup_prio = p->prio;

	wakeup_task = p;
	get_task_struct(wakeup_task);

	local_save_flags(flags);

255 256
	data = wakeup_trace->data[wakeup_cpu];
	data->preempt_timestamp = ftrace_now(cpu);
257 258
	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
	trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
259 260

out_locked:
261
	__raw_spin_unlock(&wakeup_lock);
262
out:
M
Mathieu Desnoyers 已提交
263
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
264 265
}

I
Ingo Molnar 已提交
266
static void start_wakeup_tracer(struct trace_array *tr)
267
{
M
Mathieu Desnoyers 已提交
268 269
	int ret;

M
Mathieu Desnoyers 已提交
270
	ret = register_trace_sched_wakeup(probe_wakeup);
M
Mathieu Desnoyers 已提交
271
	if (ret) {
M
Mathieu Desnoyers 已提交
272
		pr_info("wakeup trace: Couldn't activate tracepoint"
M
Mathieu Desnoyers 已提交
273 274 275 276
			" probe to kernel_sched_wakeup\n");
		return;
	}

M
Mathieu Desnoyers 已提交
277
	ret = register_trace_sched_wakeup_new(probe_wakeup);
M
Mathieu Desnoyers 已提交
278
	if (ret) {
M
Mathieu Desnoyers 已提交
279
		pr_info("wakeup trace: Couldn't activate tracepoint"
M
Mathieu Desnoyers 已提交
280 281 282 283
			" probe to kernel_sched_wakeup_new\n");
		goto fail_deprobe;
	}

M
Mathieu Desnoyers 已提交
284
	ret = register_trace_sched_switch(probe_wakeup_sched_switch);
M
Mathieu Desnoyers 已提交
285
	if (ret) {
M
Mathieu Desnoyers 已提交
286
		pr_info("sched trace: Couldn't activate tracepoint"
287
			" probe to kernel_sched_switch\n");
M
Mathieu Desnoyers 已提交
288 289 290
		goto fail_deprobe_wake_new;
	}

291 292 293 294 295 296 297 298 299 300 301
	wakeup_reset(tr);

	/*
	 * Don't let the tracer_enabled = 1 show up before
	 * the wakeup_task is reset. This may be overkill since
	 * wakeup_reset does a spin_unlock after setting the
	 * wakeup_task to NULL, but I want to be safe.
	 * This is a slow path anyway.
	 */
	smp_wmb();

302
	register_ftrace_function(&trace_ops);
303

304
	if (tracing_is_enabled())
305
		tracer_enabled = 1;
306
	else
307
		tracer_enabled = 0;
308

309
	return;
M
Mathieu Desnoyers 已提交
310
fail_deprobe_wake_new:
M
Mathieu Desnoyers 已提交
311
	unregister_trace_sched_wakeup_new(probe_wakeup);
M
Mathieu Desnoyers 已提交
312
fail_deprobe:
M
Mathieu Desnoyers 已提交
313
	unregister_trace_sched_wakeup(probe_wakeup);
314 315
}

I
Ingo Molnar 已提交
316
static void stop_wakeup_tracer(struct trace_array *tr)
317 318
{
	tracer_enabled = 0;
319
	unregister_ftrace_function(&trace_ops);
M
Mathieu Desnoyers 已提交
320 321 322
	unregister_trace_sched_switch(probe_wakeup_sched_switch);
	unregister_trace_sched_wakeup_new(probe_wakeup);
	unregister_trace_sched_wakeup(probe_wakeup);
323 324
}

325
static int __wakeup_tracer_init(struct trace_array *tr)
326
{
327
	tracing_max_latency = 0;
328
	wakeup_trace = tr;
S
Steven Rostedt 已提交
329
	start_wakeup_tracer(tr);
330
	return 0;
331 332
}

333 334 335 336 337 338 339 340 341 342 343 344
static int wakeup_tracer_init(struct trace_array *tr)
{
	wakeup_rt = 0;
	return __wakeup_tracer_init(tr);
}

static int wakeup_rt_tracer_init(struct trace_array *tr)
{
	wakeup_rt = 1;
	return __wakeup_tracer_init(tr);
}

I
Ingo Molnar 已提交
345
static void wakeup_tracer_reset(struct trace_array *tr)
346
{
S
Steven Rostedt 已提交
347 348 349
	stop_wakeup_tracer(tr);
	/* make sure we put back any tasks we are tracing */
	wakeup_reset(tr);
350 351
}

352 353 354 355 356 357 358 359 360
static void wakeup_tracer_start(struct trace_array *tr)
{
	wakeup_reset(tr);
	tracer_enabled = 1;
}

static void wakeup_tracer_stop(struct trace_array *tr)
{
	tracer_enabled = 0;
361 362 363 364 365 366 367
}

static struct tracer wakeup_tracer __read_mostly =
{
	.name		= "wakeup",
	.init		= wakeup_tracer_init,
	.reset		= wakeup_tracer_reset,
368 369
	.start		= wakeup_tracer_start,
	.stop		= wakeup_tracer_stop,
370
	.print_max	= 1,
S
Steven Rostedt 已提交
371 372 373
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_wakeup,
#endif
374 375
};

376 377 378 379 380 381 382
static struct tracer wakeup_rt_tracer __read_mostly =
{
	.name		= "wakeup_rt",
	.init		= wakeup_rt_tracer_init,
	.reset		= wakeup_tracer_reset,
	.start		= wakeup_tracer_start,
	.stop		= wakeup_tracer_stop,
383
	.wait_pipe	= poll_wait_pipe,
384 385 386 387 388 389
	.print_max	= 1,
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_wakeup,
#endif
};

390 391 392 393 394 395 396 397
__init static int init_wakeup_tracer(void)
{
	int ret;

	ret = register_tracer(&wakeup_tracer);
	if (ret)
		return ret;

398 399 400 401
	ret = register_tracer(&wakeup_rt_tracer);
	if (ret)
		return ret;

402 403 404
	return 0;
}
device_initcall(init_wakeup_tracer);