trace_stack.c 8.3 KB
Newer Older
S
Steven Rostedt 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
13
#include <linux/sysctl.h>
S
Steven Rostedt 已提交
14 15
#include <linux/init.h>
#include <linux/fs.h>
16 17 18

#include <asm/setup.h>

S
Steven Rostedt 已提交
19 20 21 22
#include "trace.h"

#define STACK_TRACE_ENTRIES 500

S
Steven Rostedt 已提交
23 24 25 26
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];

S
Steven Rostedt 已提交
27 28 29 30 31 32
static struct stack_trace max_stack_trace = {
	.max_entries		= STACK_TRACE_ENTRIES,
	.entries		= stack_dump_trace,
};

static unsigned long max_stack_size;
33
static arch_spinlock_t max_stack_lock =
34
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
S
Steven Rostedt 已提交
35 36 37

static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
38 39 40 41
static DEFINE_MUTEX(stack_sysctl_mutex);

int stack_tracer_enabled;
static int last_stack_tracer_enabled;
S
Steven Rostedt 已提交
42 43 44

static inline void check_stack(void)
{
S
Steven Rostedt 已提交
45 46 47
	unsigned long this_size, flags;
	unsigned long *p, *top, *start;
	int i;
S
Steven Rostedt 已提交
48 49 50 51 52 53 54

	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
	this_size = THREAD_SIZE - this_size;

	if (this_size <= max_stack_size)
		return;

55 56 57 58
	/* we do not handle interrupt stacks yet */
	if (!object_is_on_stack(&this_size))
		return;

59
	local_irq_save(flags);
60
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
61 62 63 64 65 66 67 68

	/* a race could have already updated it */
	if (this_size <= max_stack_size)
		goto out;

	max_stack_size = this_size;

	max_stack_trace.nr_entries	= 0;
S
Steven Rostedt 已提交
69
	max_stack_trace.skip		= 3;
S
Steven Rostedt 已提交
70 71 72

	save_stack_trace(&max_stack_trace);

S
Steven Rostedt 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	/*
	 * Now find where in the stack these are.
	 */
	i = 0;
	start = &this_size;
	top = (unsigned long *)
		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);

	/*
	 * Loop through all the entries. One of the entries may
	 * for some reason be missed on the stack, so we may
	 * have to account for them. If they are all there, this
	 * loop will only happen once. This code only takes place
	 * on a new max, so it is far from a fast path.
	 */
	while (i < max_stack_trace.nr_entries) {
S
Steven Rostedt 已提交
89
		int found = 0;
S
Steven Rostedt 已提交
90 91 92 93 94 95 96 97

		stack_dump_index[i] = this_size;
		p = start;

		for (; p < top && i < max_stack_trace.nr_entries; p++) {
			if (*p == stack_dump_trace[i]) {
				this_size = stack_dump_index[i++] =
					(top - p) * sizeof(unsigned long);
S
Steven Rostedt 已提交
98
				found = 1;
S
Steven Rostedt 已提交
99 100 101 102 103
				/* Start the search from here */
				start = p + 1;
			}
		}

S
Steven Rostedt 已提交
104 105
		if (!found)
			i++;
S
Steven Rostedt 已提交
106 107
	}

S
Steven Rostedt 已提交
108
 out:
109
	arch_spin_unlock(&max_stack_lock);
110
	local_irq_restore(flags);
S
Steven Rostedt 已提交
111 112 113 114 115
}

static void
stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
116
	int cpu;
S
Steven Rostedt 已提交
117 118 119 120

	if (unlikely(!ftrace_enabled || stack_trace_disabled))
		return;

121
	preempt_disable_notrace();
S
Steven Rostedt 已提交
122 123 124 125 126 127 128 129 130 131 132

	cpu = raw_smp_processor_id();
	/* no atomic needed, we only modify this variable by this cpu */
	if (per_cpu(trace_active, cpu)++ != 0)
		goto out;

	check_stack();

 out:
	per_cpu(trace_active, cpu)--;
	/* prevent recursion in schedule */
133
	preempt_enable_notrace();
S
Steven Rostedt 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = stack_trace_call,
};

static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
		    size_t count, loff_t *ppos)
{
	unsigned long *ptr = filp->private_data;
	char buf[64];
	int r;

	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
	if (r > sizeof(buf))
		r = sizeof(buf);
	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}

static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
		     size_t count, loff_t *ppos)
{
	long *ptr = filp->private_data;
	unsigned long val, flags;
	int ret;
162
	int cpu;
S
Steven Rostedt 已提交
163

164 165
	ret = kstrtoul_from_user(ubuf, count, 10, &val);
	if (ret)
S
Steven Rostedt 已提交
166 167
		return ret;

168
	local_irq_save(flags);
169 170 171 172 173 174 175 176 177

	/*
	 * In case we trace inside arch_spin_lock() or after (NMI),
	 * we will cause circular lock, so we also need to increase
	 * the percpu trace_active here.
	 */
	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

178
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
179
	*ptr = val;
180
	arch_spin_unlock(&max_stack_lock);
181 182

	per_cpu(trace_active, cpu)--;
183
	local_irq_restore(flags);
S
Steven Rostedt 已提交
184 185 186 187

	return count;
}

188
static const struct file_operations stack_max_size_fops = {
S
Steven Rostedt 已提交
189 190 191
	.open		= tracing_open_generic,
	.read		= stack_max_size_read,
	.write		= stack_max_size_write,
192
	.llseek		= default_llseek,
S
Steven Rostedt 已提交
193 194 195
};

static void *
L
Li Zefan 已提交
196
__next(struct seq_file *m, loff_t *pos)
S
Steven Rostedt 已提交
197
{
L
Li Zefan 已提交
198
	long n = *pos - 1;
S
Steven Rostedt 已提交
199

L
Li Zefan 已提交
200
	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
S
Steven Rostedt 已提交
201 202
		return NULL;

L
Li Zefan 已提交
203
	m->private = (void *)n;
S
Steven Rostedt 已提交
204
	return &m->private;
S
Steven Rostedt 已提交
205 206
}

L
Li Zefan 已提交
207 208
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
S
Steven Rostedt 已提交
209
{
L
Li Zefan 已提交
210 211 212
	(*pos)++;
	return __next(m, pos);
}
S
Steven Rostedt 已提交
213

L
Li Zefan 已提交
214 215
static void *t_start(struct seq_file *m, loff_t *pos)
{
216 217
	int cpu;

S
Steven Rostedt 已提交
218
	local_irq_disable();
219 220 221 222

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

223
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
224

225 226 227
	if (*pos == 0)
		return SEQ_START_TOKEN;

L
Li Zefan 已提交
228
	return __next(m, pos);
S
Steven Rostedt 已提交
229 230 231 232
}

static void t_stop(struct seq_file *m, void *p)
{
233 234
	int cpu;

235
	arch_spin_unlock(&max_stack_lock);
236 237 238 239

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)--;

S
Steven Rostedt 已提交
240 241 242
	local_irq_enable();
}

S
Steven Rostedt 已提交
243
static int trace_lookup_stack(struct seq_file *m, long i)
S
Steven Rostedt 已提交
244
{
S
Steven Rostedt 已提交
245
	unsigned long addr = stack_dump_trace[i];
S
Steven Rostedt 已提交
246

247
	return seq_printf(m, "%pS\n", (void *)addr);
S
Steven Rostedt 已提交
248 249
}

250 251 252 253 254 255 256 257 258 259 260
static void print_disabled(struct seq_file *m)
{
	seq_puts(m, "#\n"
		 "#  Stack tracer disabled\n"
		 "#\n"
		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
		 "# kernel command line\n"
		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
		 "#\n");
}

S
Steven Rostedt 已提交
261 262
static int t_show(struct seq_file *m, void *v)
{
263
	long i;
S
Steven Rostedt 已提交
264 265
	int size;

266
	if (v == SEQ_START_TOKEN) {
267
		seq_printf(m, "        Depth    Size   Location"
S
Steven Rostedt 已提交
268
			   "    (%d entries)\n"
269
			   "        -----    ----   --------\n",
270
			   max_stack_trace.nr_entries - 1);
271 272 273 274

		if (!stack_tracer_enabled && !max_stack_size)
			print_disabled(m);

S
Steven Rostedt 已提交
275 276
		return 0;
	}
S
Steven Rostedt 已提交
277

278 279
	i = *(long *)v;

S
Steven Rostedt 已提交
280 281
	if (i >= max_stack_trace.nr_entries ||
	    stack_dump_trace[i] == ULONG_MAX)
S
Steven Rostedt 已提交
282 283
		return 0;

S
Steven Rostedt 已提交
284 285 286 287 288 289 290 291 292
	if (i+1 == max_stack_trace.nr_entries ||
	    stack_dump_trace[i+1] == ULONG_MAX)
		size = stack_dump_index[i];
	else
		size = stack_dump_index[i] - stack_dump_index[i+1];

	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);

	trace_lookup_stack(m, i);
S
Steven Rostedt 已提交
293 294 295 296

	return 0;
}

297
static const struct seq_operations stack_trace_seq_ops = {
S
Steven Rostedt 已提交
298 299 300 301 302 303 304 305
	.start		= t_start,
	.next		= t_next,
	.stop		= t_stop,
	.show		= t_show,
};

static int stack_trace_open(struct inode *inode, struct file *file)
{
L
Li Zefan 已提交
306
	return seq_open(file, &stack_trace_seq_ops);
S
Steven Rostedt 已提交
307 308
}

309
static const struct file_operations stack_trace_fops = {
S
Steven Rostedt 已提交
310 311 312
	.open		= stack_trace_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
313
	.release	= seq_release,
S
Steven Rostedt 已提交
314 315
};

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
static int
stack_trace_filter_open(struct inode *inode, struct file *file)
{
	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
				 inode, file);
}

static const struct file_operations stack_trace_filter_fops = {
	.open = stack_trace_filter_open,
	.read = seq_read,
	.write = ftrace_filter_write,
	.llseek = ftrace_regex_lseek,
	.release = ftrace_regex_release,
};

331 332
int
stack_trace_sysctl(struct ctl_table *table, int write,
333
		   void __user *buffer, size_t *lenp,
334 335 336 337 338 339
		   loff_t *ppos)
{
	int ret;

	mutex_lock(&stack_sysctl_mutex);

340
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
341 342

	if (ret || !write ||
343
	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
344 345
		goto out;

346
	last_stack_tracer_enabled = !!stack_tracer_enabled;
347 348 349 350 351 352 353 354 355 356 357

	if (stack_tracer_enabled)
		register_ftrace_function(&trace_ops);
	else
		unregister_ftrace_function(&trace_ops);

 out:
	mutex_unlock(&stack_sysctl_mutex);
	return ret;
}

358 359
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;

360 361
static __init int enable_stacktrace(char *str)
{
362 363 364
	if (strncmp(str, "_filter=", 8) == 0)
		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);

365 366
	stack_tracer_enabled = 1;
	last_stack_tracer_enabled = 1;
367 368 369 370
	return 1;
}
__setup("stacktrace", enable_stacktrace);

S
Steven Rostedt 已提交
371 372 373 374 375 376
static __init int stack_trace_init(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();

377 378
	trace_create_file("stack_max_size", 0644, d_tracer,
			&max_stack_size, &stack_max_size_fops);
S
Steven Rostedt 已提交
379

380 381
	trace_create_file("stack_trace", 0444, d_tracer,
			NULL, &stack_trace_fops);
S
Steven Rostedt 已提交
382

383 384 385
	trace_create_file("stack_trace_filter", 0444, d_tracer,
			NULL, &stack_trace_filter_fops);

386 387 388
	if (stack_trace_filter_buf[0])
		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);

389
	if (stack_tracer_enabled)
390
		register_ftrace_function(&trace_ops);
S
Steven Rostedt 已提交
391 392 393 394 395

	return 0;
}

device_initcall(stack_trace_init);