trace_stack.c 8.4 KB
Newer Older
S
Steven Rostedt 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
13
#include <linux/sysctl.h>
S
Steven Rostedt 已提交
14 15
#include <linux/init.h>
#include <linux/fs.h>
16 17 18

#include <asm/setup.h>

S
Steven Rostedt 已提交
19 20 21 22
#include "trace.h"

#define STACK_TRACE_ENTRIES 500

S
Steven Rostedt 已提交
23 24 25 26
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];

S
Steven Rostedt 已提交
27 28 29 30 31 32
static struct stack_trace max_stack_trace = {
	.max_entries		= STACK_TRACE_ENTRIES,
	.entries		= stack_dump_trace,
};

static unsigned long max_stack_size;
33
static arch_spinlock_t max_stack_lock =
34
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
S
Steven Rostedt 已提交
35 36 37

static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
38 39 40 41
static DEFINE_MUTEX(stack_sysctl_mutex);

int stack_tracer_enabled;
static int last_stack_tracer_enabled;
S
Steven Rostedt 已提交
42 43 44

static inline void check_stack(void)
{
S
Steven Rostedt 已提交
45 46 47
	unsigned long this_size, flags;
	unsigned long *p, *top, *start;
	int i;
S
Steven Rostedt 已提交
48 49 50 51 52 53 54

	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
	this_size = THREAD_SIZE - this_size;

	if (this_size <= max_stack_size)
		return;

55 56 57 58
	/* we do not handle interrupt stacks yet */
	if (!object_is_on_stack(&this_size))
		return;

59
	local_irq_save(flags);
60
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
61 62 63 64 65 66 67 68

	/* a race could have already updated it */
	if (this_size <= max_stack_size)
		goto out;

	max_stack_size = this_size;

	max_stack_trace.nr_entries	= 0;
S
Steven Rostedt 已提交
69
	max_stack_trace.skip		= 3;
S
Steven Rostedt 已提交
70 71 72

	save_stack_trace(&max_stack_trace);

S
Steven Rostedt 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	/*
	 * Now find where in the stack these are.
	 */
	i = 0;
	start = &this_size;
	top = (unsigned long *)
		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);

	/*
	 * Loop through all the entries. One of the entries may
	 * for some reason be missed on the stack, so we may
	 * have to account for them. If they are all there, this
	 * loop will only happen once. This code only takes place
	 * on a new max, so it is far from a fast path.
	 */
	while (i < max_stack_trace.nr_entries) {
S
Steven Rostedt 已提交
89
		int found = 0;
S
Steven Rostedt 已提交
90 91 92 93 94 95 96 97

		stack_dump_index[i] = this_size;
		p = start;

		for (; p < top && i < max_stack_trace.nr_entries; p++) {
			if (*p == stack_dump_trace[i]) {
				this_size = stack_dump_index[i++] =
					(top - p) * sizeof(unsigned long);
S
Steven Rostedt 已提交
98
				found = 1;
S
Steven Rostedt 已提交
99 100 101 102 103
				/* Start the search from here */
				start = p + 1;
			}
		}

S
Steven Rostedt 已提交
104 105
		if (!found)
			i++;
S
Steven Rostedt 已提交
106 107
	}

S
Steven Rostedt 已提交
108
 out:
109
	arch_spin_unlock(&max_stack_lock);
110
	local_irq_restore(flags);
S
Steven Rostedt 已提交
111 112 113
}

static void
114 115
stack_trace_call(unsigned long ip, unsigned long parent_ip,
		 struct ftrace_ops *op, struct pt_regs *pt_regs)
S
Steven Rostedt 已提交
116
{
117
	int cpu;
S
Steven Rostedt 已提交
118 119 120 121

	if (unlikely(!ftrace_enabled || stack_trace_disabled))
		return;

122
	preempt_disable_notrace();
S
Steven Rostedt 已提交
123 124 125 126 127 128 129 130 131 132 133

	cpu = raw_smp_processor_id();
	/* no atomic needed, we only modify this variable by this cpu */
	if (per_cpu(trace_active, cpu)++ != 0)
		goto out;

	check_stack();

 out:
	per_cpu(trace_active, cpu)--;
	/* prevent recursion in schedule */
134
	preempt_enable_notrace();
S
Steven Rostedt 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
}

static struct ftrace_ops trace_ops __read_mostly =
{
	.func = stack_trace_call,
};

static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
		    size_t count, loff_t *ppos)
{
	unsigned long *ptr = filp->private_data;
	char buf[64];
	int r;

	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
	if (r > sizeof(buf))
		r = sizeof(buf);
	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}

static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
		     size_t count, loff_t *ppos)
{
	long *ptr = filp->private_data;
	unsigned long val, flags;
	int ret;
163
	int cpu;
S
Steven Rostedt 已提交
164

165 166
	ret = kstrtoul_from_user(ubuf, count, 10, &val);
	if (ret)
S
Steven Rostedt 已提交
167 168
		return ret;

169
	local_irq_save(flags);
170 171 172 173 174 175 176 177 178

	/*
	 * In case we trace inside arch_spin_lock() or after (NMI),
	 * we will cause circular lock, so we also need to increase
	 * the percpu trace_active here.
	 */
	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

179
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
180
	*ptr = val;
181
	arch_spin_unlock(&max_stack_lock);
182 183

	per_cpu(trace_active, cpu)--;
184
	local_irq_restore(flags);
S
Steven Rostedt 已提交
185 186 187 188

	return count;
}

189
static const struct file_operations stack_max_size_fops = {
S
Steven Rostedt 已提交
190 191 192
	.open		= tracing_open_generic,
	.read		= stack_max_size_read,
	.write		= stack_max_size_write,
193
	.llseek		= default_llseek,
S
Steven Rostedt 已提交
194 195 196
};

static void *
L
Li Zefan 已提交
197
__next(struct seq_file *m, loff_t *pos)
S
Steven Rostedt 已提交
198
{
L
Li Zefan 已提交
199
	long n = *pos - 1;
S
Steven Rostedt 已提交
200

L
Li Zefan 已提交
201
	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
S
Steven Rostedt 已提交
202 203
		return NULL;

L
Li Zefan 已提交
204
	m->private = (void *)n;
S
Steven Rostedt 已提交
205
	return &m->private;
S
Steven Rostedt 已提交
206 207
}

L
Li Zefan 已提交
208 209
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
S
Steven Rostedt 已提交
210
{
L
Li Zefan 已提交
211 212 213
	(*pos)++;
	return __next(m, pos);
}
S
Steven Rostedt 已提交
214

L
Li Zefan 已提交
215 216
static void *t_start(struct seq_file *m, loff_t *pos)
{
217 218
	int cpu;

S
Steven Rostedt 已提交
219
	local_irq_disable();
220 221 222 223

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)++;

224
	arch_spin_lock(&max_stack_lock);
S
Steven Rostedt 已提交
225

226 227 228
	if (*pos == 0)
		return SEQ_START_TOKEN;

L
Li Zefan 已提交
229
	return __next(m, pos);
S
Steven Rostedt 已提交
230 231 232 233
}

static void t_stop(struct seq_file *m, void *p)
{
234 235
	int cpu;

236
	arch_spin_unlock(&max_stack_lock);
237 238 239 240

	cpu = smp_processor_id();
	per_cpu(trace_active, cpu)--;

S
Steven Rostedt 已提交
241 242 243
	local_irq_enable();
}

S
Steven Rostedt 已提交
244
static int trace_lookup_stack(struct seq_file *m, long i)
S
Steven Rostedt 已提交
245
{
S
Steven Rostedt 已提交
246
	unsigned long addr = stack_dump_trace[i];
S
Steven Rostedt 已提交
247

248
	return seq_printf(m, "%pS\n", (void *)addr);
S
Steven Rostedt 已提交
249 250
}

251 252 253 254 255 256 257 258 259 260 261
static void print_disabled(struct seq_file *m)
{
	seq_puts(m, "#\n"
		 "#  Stack tracer disabled\n"
		 "#\n"
		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
		 "# kernel command line\n"
		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
		 "#\n");
}

S
Steven Rostedt 已提交
262 263
static int t_show(struct seq_file *m, void *v)
{
264
	long i;
S
Steven Rostedt 已提交
265 266
	int size;

267
	if (v == SEQ_START_TOKEN) {
268
		seq_printf(m, "        Depth    Size   Location"
S
Steven Rostedt 已提交
269
			   "    (%d entries)\n"
270
			   "        -----    ----   --------\n",
271
			   max_stack_trace.nr_entries - 1);
272 273 274 275

		if (!stack_tracer_enabled && !max_stack_size)
			print_disabled(m);

S
Steven Rostedt 已提交
276 277
		return 0;
	}
S
Steven Rostedt 已提交
278

279 280
	i = *(long *)v;

S
Steven Rostedt 已提交
281 282
	if (i >= max_stack_trace.nr_entries ||
	    stack_dump_trace[i] == ULONG_MAX)
S
Steven Rostedt 已提交
283 284
		return 0;

S
Steven Rostedt 已提交
285 286 287 288 289 290 291 292 293
	if (i+1 == max_stack_trace.nr_entries ||
	    stack_dump_trace[i+1] == ULONG_MAX)
		size = stack_dump_index[i];
	else
		size = stack_dump_index[i] - stack_dump_index[i+1];

	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);

	trace_lookup_stack(m, i);
S
Steven Rostedt 已提交
294 295 296 297

	return 0;
}

298
static const struct seq_operations stack_trace_seq_ops = {
S
Steven Rostedt 已提交
299 300 301 302 303 304 305 306
	.start		= t_start,
	.next		= t_next,
	.stop		= t_stop,
	.show		= t_show,
};

static int stack_trace_open(struct inode *inode, struct file *file)
{
L
Li Zefan 已提交
307
	return seq_open(file, &stack_trace_seq_ops);
S
Steven Rostedt 已提交
308 309
}

310
static const struct file_operations stack_trace_fops = {
S
Steven Rostedt 已提交
311 312 313
	.open		= stack_trace_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
314
	.release	= seq_release,
S
Steven Rostedt 已提交
315 316
};

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
static int
stack_trace_filter_open(struct inode *inode, struct file *file)
{
	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
				 inode, file);
}

static const struct file_operations stack_trace_filter_fops = {
	.open = stack_trace_filter_open,
	.read = seq_read,
	.write = ftrace_filter_write,
	.llseek = ftrace_regex_lseek,
	.release = ftrace_regex_release,
};

332 333
int
stack_trace_sysctl(struct ctl_table *table, int write,
334
		   void __user *buffer, size_t *lenp,
335 336 337 338 339 340
		   loff_t *ppos)
{
	int ret;

	mutex_lock(&stack_sysctl_mutex);

341
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
342 343

	if (ret || !write ||
344
	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
345 346
		goto out;

347
	last_stack_tracer_enabled = !!stack_tracer_enabled;
348 349 350 351 352 353 354 355 356 357 358

	if (stack_tracer_enabled)
		register_ftrace_function(&trace_ops);
	else
		unregister_ftrace_function(&trace_ops);

 out:
	mutex_unlock(&stack_sysctl_mutex);
	return ret;
}

359 360
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;

361 362
static __init int enable_stacktrace(char *str)
{
363 364 365
	if (strncmp(str, "_filter=", 8) == 0)
		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);

366 367
	stack_tracer_enabled = 1;
	last_stack_tracer_enabled = 1;
368 369 370 371
	return 1;
}
__setup("stacktrace", enable_stacktrace);

S
Steven Rostedt 已提交
372 373 374 375 376 377
static __init int stack_trace_init(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();

378 379
	trace_create_file("stack_max_size", 0644, d_tracer,
			&max_stack_size, &stack_max_size_fops);
S
Steven Rostedt 已提交
380

381 382
	trace_create_file("stack_trace", 0444, d_tracer,
			NULL, &stack_trace_fops);
S
Steven Rostedt 已提交
383

384 385 386
	trace_create_file("stack_trace_filter", 0444, d_tracer,
			NULL, &stack_trace_filter_fops);

387 388 389
	if (stack_trace_filter_buf[0])
		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);

390
	if (stack_tracer_enabled)
391
		register_ftrace_function(&trace_ops);
S
Steven Rostedt 已提交
392 393 394 395 396

	return 0;
}

device_initcall(stack_trace_init);