trace_sched_switch.c 5.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * trace context switch
 *
 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/marker.h>
#include <linux/ftrace.h>

#include "trace.h"

static struct trace_array	*ctx_trace;
static int __read_mostly	tracer_enabled;
M
Mathieu Desnoyers 已提交
19
static atomic_t			sched_ref;
20

I
Ingo Molnar 已提交
21
static void
M
Mathieu Desnoyers 已提交
22 23
sched_switch_func(void *private, void *__rq, struct task_struct *prev,
			struct task_struct *next)
24
{
M
Mathieu Desnoyers 已提交
25 26
	struct trace_array **ptr = private;
	struct trace_array *tr = *ptr;
27 28 29 30 31 32 33 34
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

	if (!tracer_enabled)
		return;

35
	local_irq_save(flags);
36 37 38 39
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

I
Ingo Molnar 已提交
40
	if (likely(disabled == 1))
41 42 43
		tracing_sched_switch_trace(tr, data, prev, next, flags);

	atomic_dec(&data->disabled);
44
	local_irq_restore(flags);
45 46
}

M
Mathieu Desnoyers 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
static notrace void
sched_switch_callback(void *probe_data, void *call_data,
		      const char *format, va_list *args)
{
	struct task_struct *prev;
	struct task_struct *next;
	struct rq *__rq;

	if (!atomic_read(&sched_ref))
		return;

	/* skip prev_pid %d next_pid %d prev_state %ld */
	(void)va_arg(*args, int);
	(void)va_arg(*args, int);
	(void)va_arg(*args, long);
	__rq = va_arg(*args, typeof(__rq));
	prev = va_arg(*args, typeof(prev));
	next = va_arg(*args, typeof(next));

	tracing_record_cmdline(prev);

	/*
	 * If tracer_switch_func only points to the local
	 * switch func, it still needs the ptr passed to it.
	 */
	sched_switch_func(probe_data, __rq, prev, next);
}

75
static void
M
Mathieu Desnoyers 已提交
76 77
wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
			task_struct *curr)
78
{
M
Mathieu Desnoyers 已提交
79 80
	struct trace_array **ptr = private;
	struct trace_array *tr = *ptr;
81 82 83 84 85 86 87 88
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

	if (!tracer_enabled)
		return;

I
Ingo Molnar 已提交
89 90
	tracing_record_cmdline(curr);

91 92 93 94 95
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

I
Ingo Molnar 已提交
96
	if (likely(disabled == 1))
97 98 99 100 101 102
		tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

M
Mathieu Desnoyers 已提交
103 104 105
static notrace void
wake_up_callback(void *probe_data, void *call_data,
		 const char *format, va_list *args)
106
{
M
Mathieu Desnoyers 已提交
107 108 109
	struct task_struct *curr;
	struct task_struct *task;
	struct rq *__rq;
110

M
Mathieu Desnoyers 已提交
111 112
	if (likely(!tracer_enabled))
		return;
113

M
Mathieu Desnoyers 已提交
114 115 116 117 118 119 120
	/* Skip pid %d state %ld */
	(void)va_arg(*args, int);
	(void)va_arg(*args, long);
	/* now get the meat: "rq %p task %p rq->curr %p" */
	__rq = va_arg(*args, typeof(__rq));
	task = va_arg(*args, typeof(task));
	curr = va_arg(*args, typeof(curr));
121

M
Mathieu Desnoyers 已提交
122 123
	tracing_record_cmdline(task);
	tracing_record_cmdline(curr);
124

M
Mathieu Desnoyers 已提交
125
	wakeup_func(probe_data, __rq, task, curr);
126 127
}

I
Ingo Molnar 已提交
128
static void sched_switch_reset(struct trace_array *tr)
129 130 131
{
	int cpu;

I
Ingo Molnar 已提交
132
	tr->time_start = ftrace_now(tr->cpu);
133 134 135 136 137

	for_each_online_cpu(cpu)
		tracing_reset(tr->data[cpu]);
}

M
Mathieu Desnoyers 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
static int tracing_sched_register(void)
{
	int ret;

	ret = marker_probe_register("kernel_sched_wakeup",
			"pid %d state %ld ## rq %p task %p rq->curr %p",
			wake_up_callback,
			&ctx_trace);
	if (ret) {
		pr_info("wakeup trace: Couldn't add marker"
			" probe to kernel_sched_wakeup\n");
		return ret;
	}

	ret = marker_probe_register("kernel_sched_wakeup_new",
			"pid %d state %ld ## rq %p task %p rq->curr %p",
			wake_up_callback,
			&ctx_trace);
	if (ret) {
		pr_info("wakeup trace: Couldn't add marker"
			" probe to kernel_sched_wakeup_new\n");
		goto fail_deprobe;
	}

	ret = marker_probe_register("kernel_sched_schedule",
		"prev_pid %d next_pid %d prev_state %ld "
		"## rq %p prev %p next %p",
		sched_switch_callback,
		&ctx_trace);
	if (ret) {
		pr_info("sched trace: Couldn't add marker"
			" probe to kernel_sched_schedule\n");
		goto fail_deprobe_wake_new;
	}

	return ret;
fail_deprobe_wake_new:
	marker_probe_unregister("kernel_sched_wakeup_new",
				wake_up_callback,
				&ctx_trace);
fail_deprobe:
	marker_probe_unregister("kernel_sched_wakeup",
				wake_up_callback,
				&ctx_trace);
	return ret;
}

static void tracing_sched_unregister(void)
{
	marker_probe_unregister("kernel_sched_schedule",
				sched_switch_callback,
				&ctx_trace);
	marker_probe_unregister("kernel_sched_wakeup_new",
				wake_up_callback,
				&ctx_trace);
	marker_probe_unregister("kernel_sched_wakeup",
				wake_up_callback,
				&ctx_trace);
}

void tracing_start_sched_switch(void)
{
	long ref;

	ref = atomic_inc_return(&sched_ref);
	if (ref == 1)
		tracing_sched_register();
}

void tracing_stop_sched_switch(void)
{
	long ref;

	ref = atomic_dec_and_test(&sched_ref);
	if (ref)
		tracing_sched_unregister();
}

I
Ingo Molnar 已提交
216
static void start_sched_trace(struct trace_array *tr)
217 218
{
	sched_switch_reset(tr);
219
	atomic_inc(&trace_record_cmdline_enabled);
220
	tracer_enabled = 1;
M
Mathieu Desnoyers 已提交
221
	tracing_start_sched_switch();
222 223
}

I
Ingo Molnar 已提交
224
static void stop_sched_trace(struct trace_array *tr)
225
{
M
Mathieu Desnoyers 已提交
226
	tracing_stop_sched_switch();
227
	atomic_dec(&trace_record_cmdline_enabled);
228 229 230
	tracer_enabled = 0;
}

I
Ingo Molnar 已提交
231
static void sched_switch_trace_init(struct trace_array *tr)
232 233 234 235 236 237 238
{
	ctx_trace = tr;

	if (tr->ctrl)
		start_sched_trace(tr);
}

I
Ingo Molnar 已提交
239
static void sched_switch_trace_reset(struct trace_array *tr)
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
{
	if (tr->ctrl)
		stop_sched_trace(tr);
}

static void sched_switch_trace_ctrl_update(struct trace_array *tr)
{
	/* When starting a new trace, reset the buffers */
	if (tr->ctrl)
		start_sched_trace(tr);
	else
		stop_sched_trace(tr);
}

static struct tracer sched_switch_trace __read_mostly =
{
	.name		= "sched_switch",
	.init		= sched_switch_trace_init,
	.reset		= sched_switch_trace_reset,
	.ctrl_update	= sched_switch_trace_ctrl_update,
S
Steven Rostedt 已提交
260 261 262
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_sched_switch,
#endif
263 264 265 266
};

__init static int init_sched_switch_trace(void)
{
M
Mathieu Desnoyers 已提交
267 268 269 270 271 272 273 274
	int ret = 0;

	if (atomic_read(&sched_ref))
		ret = tracing_sched_register();
	if (ret) {
		pr_info("error registering scheduler trace\n");
		return ret;
	}
275 276 277
	return register_tracer(&sched_switch_trace);
}
device_initcall(init_sched_switch_trace);