trace_sched_switch.c 3.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * trace context switch
 *
 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/marker.h>
#include <linux/ftrace.h>

#include "trace.h"

static struct trace_array	*ctx_trace;
static int __read_mostly	tracer_enabled;

I
Ingo Molnar 已提交
20
static void
21
ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
22 23 24 25 26 27 28 29 30 31
{
	struct trace_array *tr = ctx_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

	if (!tracer_enabled)
		return;

32
	local_irq_save(flags);
33 34 35 36
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

I
Ingo Molnar 已提交
37
	if (likely(disabled == 1))
38 39 40
		tracing_sched_switch_trace(tr, data, prev, next, flags);

	atomic_dec(&data->disabled);
41
	local_irq_restore(flags);
42 43
}

44 45
static void
wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
46 47 48 49 50 51 52 53 54 55
{
	struct trace_array *tr = ctx_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

	if (!tracer_enabled)
		return;

I
Ingo Molnar 已提交
56 57
	tracing_record_cmdline(curr);

58 59 60 61 62
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

I
Ingo Molnar 已提交
63
	if (likely(disabled == 1))
64 65 66 67 68 69
		tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

70 71 72
void
ftrace_ctx_switch(void *__rq, struct task_struct *prev,
		  struct task_struct *next)
73
{
74 75 76
	if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
		tracing_record_cmdline(prev);

77 78 79 80
	/*
	 * If tracer_switch_func only points to the local
	 * switch func, it still needs the ptr passed to it.
	 */
81
	ctx_switch_func(__rq, prev, next);
82 83 84 85 86 87 88

	/*
	 * Chain to the wakeup tracer (this is a NOP if disabled):
	 */
	wakeup_sched_switch(prev, next);
}

89
void
90 91
ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
		    struct task_struct *curr)
92
{
93
	wakeup_func(__rq, wakee, curr);
94 95 96 97 98 99 100

	/*
	 * Chain to the wakeup tracer (this is a NOP if disabled):
	 */
	wakeup_sched_wakeup(wakee, curr);
}

I
Ingo Molnar 已提交
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
	struct trace_array *tr = ctx_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

	if (!tracer_enabled)
		return;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
		__trace_special(tr, data, arg1, arg2, arg3);

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

I
Ingo Molnar 已提交
125
static void sched_switch_reset(struct trace_array *tr)
126 127 128
{
	int cpu;

I
Ingo Molnar 已提交
129
	tr->time_start = ftrace_now(tr->cpu);
130 131 132 133 134

	for_each_online_cpu(cpu)
		tracing_reset(tr->data[cpu]);
}

I
Ingo Molnar 已提交
135
static void start_sched_trace(struct trace_array *tr)
136 137
{
	sched_switch_reset(tr);
138
	atomic_inc(&trace_record_cmdline_enabled);
139 140 141
	tracer_enabled = 1;
}

I
Ingo Molnar 已提交
142
static void stop_sched_trace(struct trace_array *tr)
143
{
144
	atomic_dec(&trace_record_cmdline_enabled);
145 146 147
	tracer_enabled = 0;
}

I
Ingo Molnar 已提交
148
static void sched_switch_trace_init(struct trace_array *tr)
149 150 151 152 153 154 155
{
	ctx_trace = tr;

	if (tr->ctrl)
		start_sched_trace(tr);
}

I
Ingo Molnar 已提交
156
static void sched_switch_trace_reset(struct trace_array *tr)
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
{
	if (tr->ctrl)
		stop_sched_trace(tr);
}

static void sched_switch_trace_ctrl_update(struct trace_array *tr)
{
	/* When starting a new trace, reset the buffers */
	if (tr->ctrl)
		start_sched_trace(tr);
	else
		stop_sched_trace(tr);
}

static struct tracer sched_switch_trace __read_mostly =
{
	.name		= "sched_switch",
	.init		= sched_switch_trace_init,
	.reset		= sched_switch_trace_reset,
	.ctrl_update	= sched_switch_trace_ctrl_update,
S
Steven Rostedt 已提交
177 178 179
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_sched_switch,
#endif
180 181 182 183 184 185 186
};

__init static int init_sched_switch_trace(void)
{
	return register_tracer(&sched_switch_trace);
}
device_initcall(init_sched_switch_trace);