trace_functions.c 5.3 KB
Newer Older
S
Steven Rostedt 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * ring buffer based function tracer
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 William Lee Irwin III
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
I
Ingo Molnar 已提交
15
#include <linux/fs.h>
S
Steven Rostedt 已提交
16 17 18

#include "trace.h"

19 20 21
/* function tracing enabled */
static int			ftrace_function_enabled;

22 23
static struct trace_array	*func_trace;

24 25 26
static void tracing_start_function_trace(void);
static void tracing_stop_function_trace(void);

I
Ingo Molnar 已提交
27
static void start_function_trace(struct trace_array *tr)
S
Steven Rostedt 已提交
28
{
29
	func_trace = tr;
30
	tr->cpu = get_cpu();
31
	tracing_reset_online_cpus(tr);
32 33
	put_cpu();

34
	tracing_start_cmdline_record();
S
Steven Rostedt 已提交
35 36 37
	tracing_start_function_trace();
}

I
Ingo Molnar 已提交
38
static void stop_function_trace(struct trace_array *tr)
S
Steven Rostedt 已提交
39 40
{
	tracing_stop_function_trace();
41
	tracing_stop_cmdline_record();
S
Steven Rostedt 已提交
42 43
}

44
static int function_trace_init(struct trace_array *tr)
S
Steven Rostedt 已提交
45
{
S
Steven Rostedt 已提交
46
	start_function_trace(tr);
47
	return 0;
S
Steven Rostedt 已提交
48 49
}

I
Ingo Molnar 已提交
50
static void function_trace_reset(struct trace_array *tr)
S
Steven Rostedt 已提交
51
{
S
Steven Rostedt 已提交
52
	stop_function_trace(tr);
S
Steven Rostedt 已提交
53 54
}

55 56
static void function_trace_start(struct trace_array *tr)
{
57
	tracing_reset_online_cpus(tr);
58 59
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu, resched;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	pc = preempt_count();
	resched = ftrace_preempt_disable();
	local_save_flags(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
81
		trace_function(tr, ip, parent_ip, flags, pc);
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

	atomic_dec(&data->disabled);
	ftrace_preempt_enable(resched);
}

static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	/*
	 * Need to use raw, since this must be called before the
	 * recursive protection is performed.
	 */
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
		pc = preempt_count();
111
		trace_function(tr, ip, parent_ip, flags, pc);
112 113 114 115 116 117
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	/*
	 * Need to use raw, since this must be called before the
	 * recursive protection is performed.
	 */
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
		pc = preempt_count();
142
		trace_function(tr, ip, parent_ip, flags, pc);
143 144 145 146 147 148 149 150
		/*
		 * skip over 5 funcs:
		 *    __ftrace_trace_stack,
		 *    __trace_stack,
		 *    function_stack_trace_call
		 *    ftrace_list_func
		 *    ftrace_call
		 */
151
		__trace_stack(tr, flags, 5, pc);
152 153 154 155 156 157
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

158 159 160 161 162 163

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = function_trace_call,
};

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static struct ftrace_ops trace_stack_ops __read_mostly =
{
	.func = function_stack_trace_call,
};

/* Our two options */
enum {
	TRACE_FUNC_OPT_STACK = 0x1,
};

static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
	{ } /* Always set a last empty entry */
};

static struct tracer_flags func_flags = {
	.val = 0, /* By default: all flags disabled */
	.opts = func_opts
};

186
static void tracing_start_function_trace(void)
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
{
	ftrace_function_enabled = 0;

	if (trace_flags & TRACE_ITER_PREEMPTONLY)
		trace_ops.func = function_trace_call_preempt_only;
	else
		trace_ops.func = function_trace_call;

	if (func_flags.val & TRACE_FUNC_OPT_STACK)
		register_ftrace_function(&trace_stack_ops);
	else
		register_ftrace_function(&trace_ops);

	ftrace_function_enabled = 1;
}

203
static void tracing_stop_function_trace(void)
204 205 206 207 208 209 210
{
	ftrace_function_enabled = 0;
	/* OK if they are not registered */
	unregister_ftrace_function(&trace_stack_ops);
	unregister_ftrace_function(&trace_ops);
}

211 212 213 214 215 216 217
static int func_set_flag(u32 old_flags, u32 bit, int set)
{
	if (bit == TRACE_FUNC_OPT_STACK) {
		/* do nothing if already set */
		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
			return 0;

218 219
		if (set) {
			unregister_ftrace_function(&trace_ops);
220
			register_ftrace_function(&trace_stack_ops);
221
		} else {
222
			unregister_ftrace_function(&trace_stack_ops);
223 224
			register_ftrace_function(&trace_ops);
		}
225 226 227 228 229 230 231

		return 0;
	}

	return -EINVAL;
}

S
Steven Rostedt 已提交
232 233
static struct tracer function_trace __read_mostly =
{
234 235 236 237
	.name		= "function",
	.init		= function_trace_init,
	.reset		= function_trace_reset,
	.start		= function_trace_start,
238 239
	.flags		= &func_flags,
	.set_flag	= func_set_flag,
S
Steven Rostedt 已提交
240
#ifdef CONFIG_FTRACE_SELFTEST
241
	.selftest	= trace_selftest_startup_function,
S
Steven Rostedt 已提交
242
#endif
S
Steven Rostedt 已提交
243 244 245 246 247 248 249 250
};

static __init int init_function_trace(void)
{
	return register_tracer(&function_trace);
}

device_initcall(init_function_trace);