dumpstack_64.c 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
 */
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
10
#include <linux/export.h>
11 12
#include <linux/ptrace.h>
#include <linux/kexec.h>
13
#include <linux/sysfs.h>
14 15 16 17 18
#include <linux/bug.h>
#include <linux/nmi.h>

#include <asm/stacktrace.h>

19 20 21 22 23 24
static char *exception_stack_names[N_EXCEPTION_STACKS] = {
		[ DOUBLEFAULT_STACK-1	]	= "#DF",
		[ NMI_STACK-1		]	= "NMI",
		[ DEBUG_STACK-1		]	= "#DB",
		[ MCE_STACK-1		]	= "#MC",
};
25

26 27 28
static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = {
	[0 ... N_EXCEPTION_STACKS - 1]		= EXCEPTION_STKSZ,
	[DEBUG_STACK - 1]			= DEBUG_STKSZ
29
};
30

31 32
static unsigned long *in_exception_stack(unsigned long stack, unsigned *usedp,
					 char **idp)
33
{
34
	unsigned long begin, end;
35 36
	unsigned k;

37 38
	BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);

39
	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
40 41 42 43
		end   = raw_cpu_ptr(&orig_ist)->ist[k];
		begin = end - exception_stack_sizes[k];

		if (stack < begin || stack >= end)
44
			continue;
45

46
		/*
47 48 49
		 * Make sure we only iterate through an exception stack once.
		 * If it comes up for the second time then there's something
		 * wrong going on - just break and return NULL:
50
		 */
51 52 53
		if (*usedp & (1U << k))
			break;
		*usedp |= 1U << k;
54

55 56
		*idp = exception_stack_names[k];
		return (unsigned long *)end;
57
	}
58

59 60 61
	return NULL;
}

62 63 64 65 66 67 68
static inline int
in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
	     unsigned long *irq_stack_end)
{
	return (stack >= irq_stack && stack < irq_stack_end);
}

69 70 71 72 73 74 75 76
enum stack_type {
	STACK_IS_UNKNOWN,
	STACK_IS_NORMAL,
	STACK_IS_EXCEPTION,
	STACK_IS_IRQ,
};

static enum stack_type
77
analyze_stack(struct task_struct *task, unsigned long *stack,
78 79
	      unsigned long **stack_end, unsigned long *irq_stack,
	      unsigned *used, char **id)
80 81 82 83 84 85 86
{
	unsigned long addr;

	addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
	if ((unsigned long)task_stack_page(task) == addr)
		return STACK_IS_NORMAL;

87
	*stack_end = in_exception_stack((unsigned long)stack, used, id);
88 89 90
	if (*stack_end)
		return STACK_IS_EXCEPTION;

91 92
	if (!irq_stack)
		return STACK_IS_NORMAL;
93

94
	*stack_end = irq_stack;
95
	irq_stack -= (IRQ_STACK_SIZE / sizeof(long));
96 97 98 99 100 101 102

	if (in_irq_stack(stack, irq_stack, *stack_end))
		return STACK_IS_IRQ;

	return STACK_IS_UNKNOWN;
}

103 104 105 106 107 108 109
/*
 * x86-64 can have up to three kernel stacks:
 * process stack
 * interrupt stack
 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
 */

110 111
void dump_trace(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, unsigned long bp,
112 113
		const struct stacktrace_ops *ops, void *data)
{
114
	unsigned long *irq_stack = (unsigned long *)this_cpu_read(irq_stack_ptr);
115
	unsigned used = 0;
116 117
	int graph = 0;
	int done = 0;
118

119 120 121
	task = task ? : current;
	stack = stack ? : get_stack_pointer(task, regs);
	bp = bp ? : (unsigned long)get_frame_pointer(task, regs);
122 123 124 125 126 127

	/*
	 * Print function call entries in all stacks, starting at the
	 * current stack address. If the stacks consist of nested
	 * exceptions
	 */
128 129 130
	while (!done) {
		unsigned long *stack_end;
		enum stack_type stype;
131 132
		char *id;

133 134
		stype = analyze_stack(task, stack, &stack_end, irq_stack, &used,
				      &id);
135 136 137 138 139 140 141 142 143 144 145 146

		/* Default finish unless specified to continue */
		done = 1;

		switch (stype) {

		/* Break out early if we are on the thread stack */
		case STACK_IS_NORMAL:
			break;

		case STACK_IS_EXCEPTION:

147 148 149
			if (ops->stack(data, id) < 0)
				break;

150
			bp = ops->walk_stack(task, stack, bp, ops,
151
					     data, stack_end, &graph);
152
			ops->stack(data, "EOE");
153 154 155 156 157
			/*
			 * We link to the next stack via the
			 * second-to-last pointer (index -2 to end) in the
			 * exception stack:
			 */
158 159 160 161 162 163 164 165
			stack = (unsigned long *) stack_end[-2];
			done = 0;
			break;

		case STACK_IS_IRQ:

			if (ops->stack(data, "IRQ") < 0)
				break;
166
			bp = ops->walk_stack(task, stack, bp,
167 168 169 170 171 172 173
				     ops, data, stack_end, &graph);
			/*
			 * We link to the next stack (which would be
			 * the process stack normally) the last
			 * pointer (index -1 to end) in the IRQ stack:
			 */
			stack = (unsigned long *) (stack_end[-1]);
174
			irq_stack = NULL;
175 176 177 178 179 180 181
			ops->stack(data, "EOI");
			done = 0;
			break;

		case STACK_IS_UNKNOWN:
			ops->stack(data, "UNK");
			break;
182 183 184 185 186 187
		}
	}

	/*
	 * This handles the process stack:
	 */
188
	bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
189 190 191
}
EXPORT_SYMBOL(dump_trace);

192
void
193
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
194
		   unsigned long *sp, unsigned long bp, char *log_lvl)
195
{
196 197
	unsigned long *irq_stack_end;
	unsigned long *irq_stack;
198 199
	unsigned long *stack;
	int i;
200

201
	irq_stack_end = (unsigned long *)this_cpu_read(irq_stack_ptr);
202
	irq_stack     = irq_stack_end - (IRQ_STACK_SIZE / sizeof(long));
203

204
	sp = sp ? : get_stack_pointer(task, regs);
205 206 207

	stack = sp;
	for (i = 0; i < kstack_depth_to_print; i++) {
208 209
		unsigned long word;

210 211 212
		if (stack >= irq_stack && stack <= irq_stack_end) {
			if (stack == irq_stack_end) {
				stack = (unsigned long *) (irq_stack_end[-1]);
213
				pr_cont(" <EOI> ");
214 215
			}
		} else {
216
		if (kstack_end(stack))
217 218
			break;
		}
219 220 221 222

		if (probe_kernel_address(stack, word))
			break;

223 224 225
		if ((i % STACKSLOTS_PER_LINE) == 0) {
			if (i != 0)
				pr_cont("\n");
226
			printk("%s %016lx", log_lvl, word);
227
		} else
228 229 230
			pr_cont(" %016lx", word);

		stack++;
231 232
		touch_nmi_watchdog();
	}
233

234
	pr_cont("\n");
235
	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
236 237
}

238
void show_regs(struct pt_regs *regs)
239 240 241
{
	int i;

242
	show_regs_print_info(KERN_DEFAULT);
243 244 245 246 247 248 249 250 251 252 253 254
	__show_regs(regs, 1);

	/*
	 * When in-kernel, we also print out the stack and code at the
	 * time of the fault..
	 */
	if (!user_mode(regs)) {
		unsigned int code_prologue = code_bytes * 43 / 64;
		unsigned int code_len = code_bytes;
		unsigned char c;
		u8 *ip;

255
		printk(KERN_DEFAULT "Stack:\n");
256
		show_stack_log_lvl(NULL, regs, NULL, 0, KERN_DEFAULT);
257

258
		printk(KERN_DEFAULT "Code: ");
259 260 261

		ip = (u8 *)regs->ip - code_prologue;
		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
262
			/* try starting at IP */
263 264 265 266 267 268
			ip = (u8 *)regs->ip;
			code_len = code_len - code_prologue + 1;
		}
		for (i = 0; i < code_len; i++, ip++) {
			if (ip < (u8 *)PAGE_OFFSET ||
					probe_kernel_address(ip, c)) {
269
				pr_cont(" Bad RIP value.");
270 271 272
				break;
			}
			if (ip == (u8 *)regs->ip)
273
				pr_cont("<%02x> ", c);
274
			else
275
				pr_cont("%02x ", c);
276 277
		}
	}
278
	pr_cont("\n");
279 280 281 282 283 284 285 286 287 288 289
}

int is_valid_bugaddr(unsigned long ip)
{
	unsigned short ud2;

	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
		return 0;

	return ud2 == 0x0b0f;
}