trace_stack.c 10.0 KB
Newer Older
S
Steven Rostedt 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
13
#include <linux/sysctl.h>
S
Steven Rostedt 已提交
14 15
#include <linux/init.h>
#include <linux/fs.h>
16 17 18

#include <asm/setup.h>

S
Steven Rostedt 已提交
19 20 21 22
#include "trace.h"

#define STACK_TRACE_ENTRIES 500

23
#ifdef CC_USING_FENTRY
24
# define fentry		1
25
#else
26
# define fentry		0
27 28
#endif

S
Steven Rostedt 已提交
29 30 31 32
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];

33 34 35 36 37
/*
 * Reserve one entry for the passed in ip. This will allow
 * us to remove most or all of the stack size overhead
 * added by the stack tracer itself.
 */
S
Steven Rostedt 已提交
38
static struct stack_trace max_stack_trace = {
39 40
	.max_entries		= STACK_TRACE_ENTRIES - 1,
	.entries		= &stack_dump_trace[1],
S
Steven Rostedt 已提交
41 42 43
};

static unsigned long max_stack_size;
44
static arch_spinlock_t max_stack_lock =
45
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
S
Steven Rostedt 已提交
46 47

static DEFINE_PER_CPU(int, trace_active);
48 49 50 51
static DEFINE_MUTEX(stack_sysctl_mutex);

int stack_tracer_enabled;
static int last_stack_tracer_enabled;
S
Steven Rostedt 已提交
52

53
static inline void
54
check_stack(unsigned long ip, unsigned long *stack)
S
Steven Rostedt 已提交
55
{
S
Steven Rostedt 已提交
56 57
	unsigned long this_size, flags;
	unsigned long *p, *top, *start;
58 59
	static int tracer_frame;
	int frame_size = ACCESS_ONCE(tracer_frame);
S
Steven Rostedt 已提交
60
	int i;
S
Steven Rostedt 已提交
61

62
	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
S
Steven Rostedt 已提交
63
	this_size = THREAD_SIZE - this_size;
64 65
	/* Remove the frame of the tracer */
	this_size -= frame_size;
S
Steven Rostedt 已提交
66 67 68 69

	if (this_size <= max_stack_size)
		return;

70
	/* we do not handle interrupt stacks yet */
71
	if (!object_is_on_stack(stack))
72 73
		return;

74
	local_irq_save(flags);
75
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
76

77 78 79 80
	/* In case another CPU set the tracer_frame on us */
	if (unlikely(!frame_size))
		this_size -= tracer_frame;

S
Steven Rostedt 已提交
81 82 83 84 85 86 87
	/* a race could have already updated it */
	if (this_size <= max_stack_size)
		goto out;

	max_stack_size = this_size;

	max_stack_trace.nr_entries	= 0;
S
Steven Rostedt 已提交
88
	max_stack_trace.skip		= 3;
S
Steven Rostedt 已提交
89 90 91

	save_stack_trace(&max_stack_trace);

92
	/*
93 94 95
	 * Add the passed in ip from the function tracer.
	 * Searching for this on the stack will skip over
	 * most of the overhead from the stack tracer itself.
96
	 */
97 98
	stack_dump_trace[0] = ip;
	max_stack_trace.nr_entries++;
99

S
Steven Rostedt 已提交
100 101 102 103
	/*
	 * Now find where in the stack these are.
	 */
	i = 0;
104
	start = stack;
S
Steven Rostedt 已提交
105 106 107 108 109 110 111 112 113 114 115
	top = (unsigned long *)
		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);

	/*
	 * Loop through all the entries. One of the entries may
	 * for some reason be missed on the stack, so we may
	 * have to account for them. If they are all there, this
	 * loop will only happen once. This code only takes place
	 * on a new max, so it is far from a fast path.
	 */
	while (i < max_stack_trace.nr_entries) {
S
Steven Rostedt 已提交
116
		int found = 0;
S
Steven Rostedt 已提交
117 118 119 120 121 122 123 124

		stack_dump_index[i] = this_size;
		p = start;

		for (; p < top && i < max_stack_trace.nr_entries; p++) {
			if (*p == stack_dump_trace[i]) {
				this_size = stack_dump_index[i++] =
					(top - p) * sizeof(unsigned long);
S
Steven Rostedt 已提交
125
				found = 1;
S
Steven Rostedt 已提交
126 127
				/* Start the search from here */
				start = p + 1;
128 129 130 131 132 133 134 135 136 137 138 139
				/*
				 * We do not want to show the overhead
				 * of the stack tracer stack in the
				 * max stack. If we haven't figured
				 * out what that is, then figure it out
				 * now.
				 */
				if (unlikely(!tracer_frame) && i == 1) {
					tracer_frame = (p - stack) *
						sizeof(unsigned long);
					max_stack_size -= tracer_frame;
				}
S
Steven Rostedt 已提交
140 141 142
			}
		}

S
Steven Rostedt 已提交
143 144
		if (!found)
			i++;
S
Steven Rostedt 已提交
145 146
	}

S
Steven Rostedt 已提交
147
 out:
148
	arch_spin_unlock(&max_stack_lock);
149
	local_irq_restore(flags);
S
Steven Rostedt 已提交
150 151 152
}

static void
153 154
stack_trace_call(unsigned long ip, unsigned long parent_ip,
		 struct ftrace_ops *op, struct pt_regs *pt_regs)
S
Steven Rostedt 已提交
155
{
156
	unsigned long stack;
157
	int cpu;
S
Steven Rostedt 已提交
158

159
	preempt_disable_notrace();
S
Steven Rostedt 已提交
160 161 162 163 164 165

	cpu = raw_smp_processor_id();
	/* no atomic needed, we only modify this variable by this cpu */
	if (per_cpu(trace_active, cpu)++ != 0)
		goto out;

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
	/*
	 * When fentry is used, the traced function does not get
	 * its stack frame set up, and we lose the parent.
	 * The ip is pretty useless because the function tracer
	 * was called before that function set up its stack frame.
	 * In this case, we use the parent ip.
	 *
	 * By adding the return address of either the parent ip
	 * or the current ip we can disregard most of the stack usage
	 * caused by the stack tracer itself.
	 *
	 * The function tracer always reports the address of where the
	 * mcount call was, but the stack will hold the return address.
	 */
	if (fentry)
		ip = parent_ip;
	else
		ip += MCOUNT_INSN_SIZE;

	check_stack(ip, &stack);
S
Steven Rostedt 已提交
186 187 188 189

 out:
	per_cpu(trace_active, cpu)--;
	/* prevent recursion in schedule */
190
	preempt_enable_notrace();
S
Steven Rostedt 已提交
191 192 193 194 195
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = stack_trace_call,
196
	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
S
Steven Rostedt 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
};

static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
		    size_t count, loff_t *ppos)
{
	unsigned long *ptr = filp->private_data;
	char buf[64];
	int r;

	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
	if (r > sizeof(buf))
		r = sizeof(buf);
	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}

static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
		     size_t count, loff_t *ppos)
{
	long *ptr = filp->private_data;
	unsigned long val, flags;
	int ret;
220
	int cpu;
S
Steven Rostedt 已提交
221

222 223
	ret = kstrtoul_from_user(ubuf, count, 10, &val);
	if (ret)
S
Steven Rostedt 已提交
224 225
		return ret;

226
	local_irq_save(flags);
227 228 229 230 231 232 233 234 235

	/*
	 * In case we trace inside arch_spin_lock() or after (NMI),
	 * we will cause circular lock, so we also need to increase
	 * the percpu trace_active here.
	 */
	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

236
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
237
	*ptr = val;
238
	arch_spin_unlock(&max_stack_lock);
239 240

	per_cpu(trace_active, cpu)--;
241
	local_irq_restore(flags);
S
Steven Rostedt 已提交
242 243 244 245

	return count;
}

246
static const struct file_operations stack_max_size_fops = {
S
Steven Rostedt 已提交
247 248 249
	.open		= tracing_open_generic,
	.read		= stack_max_size_read,
	.write		= stack_max_size_write,
250
	.llseek		= default_llseek,
S
Steven Rostedt 已提交
251 252 253
};

static void *
L
Li Zefan 已提交
254
__next(struct seq_file *m, loff_t *pos)
S
Steven Rostedt 已提交
255
{
L
Li Zefan 已提交
256
	long n = *pos - 1;
S
Steven Rostedt 已提交
257

L
Li Zefan 已提交
258
	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
S
Steven Rostedt 已提交
259 260
		return NULL;

L
Li Zefan 已提交
261
	m->private = (void *)n;
S
Steven Rostedt 已提交
262
	return &m->private;
S
Steven Rostedt 已提交
263 264
}

L
Li Zefan 已提交
265 266
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
S
Steven Rostedt 已提交
267
{
L
Li Zefan 已提交
268 269 270
	(*pos)++;
	return __next(m, pos);
}
S
Steven Rostedt 已提交
271

L
Li Zefan 已提交
272 273
static void *t_start(struct seq_file *m, loff_t *pos)
{
274 275
	int cpu;

S
Steven Rostedt 已提交
276
	local_irq_disable();
277 278 279 280

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

281
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
282

283 284 285
	if (*pos == 0)
		return SEQ_START_TOKEN;

L
Li Zefan 已提交
286
	return __next(m, pos);
S
Steven Rostedt 已提交
287 288 289 290
}

static void t_stop(struct seq_file *m, void *p)
{
291 292
	int cpu;

293
	arch_spin_unlock(&max_stack_lock);
294 295 296 297

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)--;

S
Steven Rostedt 已提交
298 299 300
	local_irq_enable();
}

S
Steven Rostedt 已提交
301
static int trace_lookup_stack(struct seq_file *m, long i)
S
Steven Rostedt 已提交
302
{
S
Steven Rostedt 已提交
303
	unsigned long addr = stack_dump_trace[i];
S
Steven Rostedt 已提交
304

305
	return seq_printf(m, "%pS\n", (void *)addr);
S
Steven Rostedt 已提交
306 307
}

308 309 310 311 312 313 314 315 316 317 318
static void print_disabled(struct seq_file *m)
{
	seq_puts(m, "#\n"
		 "#  Stack tracer disabled\n"
		 "#\n"
		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
		 "# kernel command line\n"
		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
		 "#\n");
}

S
Steven Rostedt 已提交
319 320
static int t_show(struct seq_file *m, void *v)
{
321
	long i;
S
Steven Rostedt 已提交
322 323
	int size;

324
	if (v == SEQ_START_TOKEN) {
325
		seq_printf(m, "        Depth    Size   Location"
S
Steven Rostedt 已提交
326
			   "    (%d entries)\n"
327
			   "        -----    ----   --------\n",
328
			   max_stack_trace.nr_entries - 1);
329 330 331 332

		if (!stack_tracer_enabled && !max_stack_size)
			print_disabled(m);

S
Steven Rostedt 已提交
333 334
		return 0;
	}
S
Steven Rostedt 已提交
335

336 337
	i = *(long *)v;

S
Steven Rostedt 已提交
338 339
	if (i >= max_stack_trace.nr_entries ||
	    stack_dump_trace[i] == ULONG_MAX)
S
Steven Rostedt 已提交
340 341
		return 0;

S
Steven Rostedt 已提交
342 343 344 345 346 347 348 349 350
	if (i+1 == max_stack_trace.nr_entries ||
	    stack_dump_trace[i+1] == ULONG_MAX)
		size = stack_dump_index[i];
	else
		size = stack_dump_index[i] - stack_dump_index[i+1];

	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);

	trace_lookup_stack(m, i);
S
Steven Rostedt 已提交
351 352 353 354

	return 0;
}

355
static const struct seq_operations stack_trace_seq_ops = {
S
Steven Rostedt 已提交
356 357 358 359 360 361 362 363
	.start		= t_start,
	.next		= t_next,
	.stop		= t_stop,
	.show		= t_show,
};

static int stack_trace_open(struct inode *inode, struct file *file)
{
L
Li Zefan 已提交
364
	return seq_open(file, &stack_trace_seq_ops);
S
Steven Rostedt 已提交
365 366
}

367
static const struct file_operations stack_trace_fops = {
S
Steven Rostedt 已提交
368 369 370
	.open		= stack_trace_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
371
	.release	= seq_release,
S
Steven Rostedt 已提交
372 373
};

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
static int
stack_trace_filter_open(struct inode *inode, struct file *file)
{
	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
				 inode, file);
}

static const struct file_operations stack_trace_filter_fops = {
	.open = stack_trace_filter_open,
	.read = seq_read,
	.write = ftrace_filter_write,
	.llseek = ftrace_regex_lseek,
	.release = ftrace_regex_release,
};

389 390
int
stack_trace_sysctl(struct ctl_table *table, int write,
391
		   void __user *buffer, size_t *lenp,
392 393 394 395 396 397
		   loff_t *ppos)
{
	int ret;

	mutex_lock(&stack_sysctl_mutex);

398
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
399 400

	if (ret || !write ||
401
	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
402 403
		goto out;

404
	last_stack_tracer_enabled = !!stack_tracer_enabled;
405 406 407 408 409 410 411 412 413 414 415

	if (stack_tracer_enabled)
		register_ftrace_function(&trace_ops);
	else
		unregister_ftrace_function(&trace_ops);

 out:
	mutex_unlock(&stack_sysctl_mutex);
	return ret;
}

416 417
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;

418 419
static __init int enable_stacktrace(char *str)
{
420 421 422
	if (strncmp(str, "_filter=", 8) == 0)
		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);

423 424
	stack_tracer_enabled = 1;
	last_stack_tracer_enabled = 1;
425 426 427 428
	return 1;
}
__setup("stacktrace", enable_stacktrace);

S
Steven Rostedt 已提交
429 430 431 432 433 434
static __init int stack_trace_init(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();

435 436
	trace_create_file("stack_max_size", 0644, d_tracer,
			&max_stack_size, &stack_max_size_fops);
S
Steven Rostedt 已提交
437

438 439
	trace_create_file("stack_trace", 0444, d_tracer,
			NULL, &stack_trace_fops);
S
Steven Rostedt 已提交
440

441 442 443
	trace_create_file("stack_trace_filter", 0444, d_tracer,
			NULL, &stack_trace_filter_fops);

444 445 446
	if (stack_trace_filter_buf[0])
		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);

447
	if (stack_tracer_enabled)
448
		register_ftrace_function(&trace_ops);
S
Steven Rostedt 已提交
449 450 451 452 453

	return 0;
}

device_initcall(stack_trace_init);