panic.c 17.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/panic.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 * This function is used through-out the kernel (including mm and fs)
 * to indicate a major problem.
 */
I
Ingo Molnar 已提交
11
#include <linux/debug_locks.h>
12
#include <linux/sched/debug.h>
I
Ingo Molnar 已提交
13
#include <linux/interrupt.h>
14
#include <linux/kmsg_dump.h>
I
Ingo Molnar 已提交
15 16
#include <linux/kallsyms.h>
#include <linux/notifier.h>
17
#include <linux/vt_kern.h>
L
Linus Torvalds 已提交
18
#include <linux/module.h>
I
Ingo Molnar 已提交
19
#include <linux/random.h>
20
#include <linux/ftrace.h>
L
Linus Torvalds 已提交
21
#include <linux/reboot.h>
I
Ingo Molnar 已提交
22 23 24
#include <linux/delay.h>
#include <linux/kexec.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
25
#include <linux/sysrq.h>
I
Ingo Molnar 已提交
26
#include <linux/init.h>
L
Linus Torvalds 已提交
27
#include <linux/nmi.h>
28
#include <linux/console.h>
29
#include <linux/bug.h>
30
#include <linux/ratelimit.h>
31 32
#include <linux/debugfs.h>
#include <asm/sections.h>
L
Linus Torvalds 已提交
33

34 35 36
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18

37
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
K
Kees Cook 已提交
38 39
static unsigned long tainted_mask =
	IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
40 41 42
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
43
bool crash_kexec_post_notifiers;
P
Prarit Bhargava 已提交
44
int panic_on_warn __read_mostly;
L
Linus Torvalds 已提交
45

46
int panic_timeout = CONFIG_PANIC_TIMEOUT;
47
EXPORT_SYMBOL_GPL(panic_timeout);
L
Linus Torvalds 已提交
48

49 50 51 52 53
#define PANIC_PRINT_TASK_INFO		0x00000001
#define PANIC_PRINT_MEM_INFO		0x00000002
#define PANIC_PRINT_TIMER_INFO		0x00000004
#define PANIC_PRINT_LOCK_INFO		0x00000008
#define PANIC_PRINT_FTRACE_INFO		0x00000010
54
#define PANIC_PRINT_ALL_PRINTK_MSG	0x00000020
55
unsigned long panic_print;
56

57
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
L
Linus Torvalds 已提交
58 59 60

EXPORT_SYMBOL(panic_notifier_list);

61
static long no_blink(int state)
62
{
63
	return 0;
64 65
}

66 67 68 69
/* Returns how long it waited in ms */
long (*panic_blink)(int state);
EXPORT_SYMBOL(panic_blink);

70 71 72 73 74 75 76 77 78
/*
 * Stop ourself in panic -- architecture code may override this
 */
void __weak panic_smp_self_stop(void)
{
	while (1)
		cpu_relax();
}

79 80 81 82 83 84 85 86 87
/*
 * Stop ourselves in NMI context if another CPU has already panicked. Arch code
 * may override this to prepare for crash dumping, e.g. save regs info.
 */
void __weak nmi_panic_self_stop(struct pt_regs *regs)
{
	panic_smp_self_stop();
}

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
/*
 * Stop other CPUs in panic.  Architecture dependent code may override this
 * with more suitable version.  For example, if the architecture supports
 * crash dump, it should save registers of each stopped CPU and disable
 * per-CPU features such as virtualization extensions.
 */
void __weak crash_smp_send_stop(void)
{
	static int cpus_stopped;

	/*
	 * This function can be called twice in panic path, but obviously
	 * we execute this only once.
	 */
	if (cpus_stopped)
		return;

	/*
	 * Note smp_send_stop is the usual smp shutdown function, which
	 * unfortunately means it may not be hardened to work in a panic
	 * situation.
	 */
	smp_send_stop();
	cpus_stopped = 1;
}

114 115
atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
/*
 * A variant of panic() called from NMI context. We return if we've already
 * panicked on this CPU. If another CPU already panicked, loop in
 * nmi_panic_self_stop() which can provide architecture dependent code such
 * as saving register state for crash dump.
 */
void nmi_panic(struct pt_regs *regs, const char *msg)
{
	int old_cpu, cpu;

	cpu = raw_smp_processor_id();
	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);

	if (old_cpu == PANIC_CPU_INVALID)
		panic("%s", msg);
	else if (old_cpu != cpu)
		nmi_panic_self_stop(regs);
}
EXPORT_SYMBOL(nmi_panic);

136 137
static void panic_print_sys_info(void)
{
138 139 140
	if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
		console_flush_on_panic(CONSOLE_REPLAY_ALL);

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
	if (panic_print & PANIC_PRINT_TASK_INFO)
		show_state();

	if (panic_print & PANIC_PRINT_MEM_INFO)
		show_mem(0, NULL);

	if (panic_print & PANIC_PRINT_TIMER_INFO)
		sysrq_timer_list_show();

	if (panic_print & PANIC_PRINT_LOCK_INFO)
		debug_show_all_locks();

	if (panic_print & PANIC_PRINT_FTRACE_INFO)
		ftrace_dump(DUMP_ALL);
}

L
Linus Torvalds 已提交
157 158 159 160 161 162 163 164
/**
 *	panic - halt the system
 *	@fmt: The text string to print
 *
 *	Display a message, then perform cleanups.
 *
 *	This function never returns.
 */
165
void panic(const char *fmt, ...)
L
Linus Torvalds 已提交
166 167 168
{
	static char buf[1024];
	va_list args;
169
	long i, i_next = 0, len;
170
	int state = 0;
171
	int old_cpu, this_cpu;
172
	bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
L
Linus Torvalds 已提交
173

174 175 176 177
	/*
	 * Disable local interrupts. This will prevent panic_smp_self_stop
	 * from deadlocking the first cpu that invokes the panic, since
	 * there is nothing to prevent an interrupt handler (that runs
178
	 * after setting panic_cpu) from invoking panic() again.
179 180 181
	 */
	local_irq_disable();

182
	/*
I
Ingo Molnar 已提交
183 184
	 * It's possible to come here directly from a panic-assertion and
	 * not have preempt disabled. Some functions called from here want
185
	 * preempt to be disabled. No point enabling it later though...
186 187 188 189 190
	 *
	 * Only one CPU is allowed to execute the panic code from here. For
	 * multiple parallel invocations of panic, all other CPUs either
	 * stop themself or will wait until they are stopped by the 1st CPU
	 * with smp_send_stop().
191 192 193 194 195
	 *
	 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
	 * comes here, so go ahead.
	 * `old_cpu == this_cpu' means we came from nmi_panic() which sets
	 * panic_cpu to this CPU.  In this case, this is also the 1st CPU.
196
	 */
197 198 199 200
	this_cpu = raw_smp_processor_id();
	old_cpu  = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);

	if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
201
		panic_smp_self_stop();
202

203
	console_verbose();
L
Linus Torvalds 已提交
204 205
	bust_spinlocks(1);
	va_start(args, fmt);
206
	len = vscnprintf(buf, sizeof(buf), fmt, args);
L
Linus Torvalds 已提交
207
	va_end(args);
208 209 210 211

	if (len && buf[len - 1] == '\n')
		buf[len - 1] = '\0';

212
	pr_emerg("Kernel panic - not syncing: %s\n", buf);
213
#ifdef CONFIG_DEBUG_BUGVERBOSE
214 215 216
	/*
	 * Avoid nested stack-dumping if a panic occurs during oops processing
	 */
217
	if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
218
		dump_stack();
219
#endif
L
Linus Torvalds 已提交
220

221 222 223
	/*
	 * If we have crashed and we have a crash kernel loaded let it handle
	 * everything else.
224 225
	 * If we want to run this after calling panic_notifiers, pass
	 * the "crash_kexec_post_notifiers" option to the kernel.
226 227
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
228
	 */
229
	if (!_crash_kexec_post_notifiers) {
230
		printk_safe_flush_on_panic();
231
		__crash_kexec(NULL);
232

233 234 235 236 237 238 239 240 241 242 243 244 245 246
		/*
		 * Note smp_send_stop is the usual smp shutdown function, which
		 * unfortunately means it may not be hardened to work in a
		 * panic situation.
		 */
		smp_send_stop();
	} else {
		/*
		 * If we want to do crash dump after notifier calls and
		 * kmsg_dump, we will need architecture dependent extra
		 * works in addition to stopping other CPUs.
		 */
		crash_smp_send_stop();
	}
L
Linus Torvalds 已提交
247

248 249 250 251
	/*
	 * Run any panic handlers, including those that might need to
	 * add information to the kmsg dump output.
	 */
252
	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
L
Linus Torvalds 已提交
253

254
	/* Call flush even twice. It tries harder with a single online CPU */
255
	printk_safe_flush_on_panic();
256 257
	kmsg_dump(KMSG_DUMP_PANIC);

258 259 260 261 262 263
	/*
	 * If you doubt kdump always works fine in any situation,
	 * "crash_kexec_post_notifiers" offers you a chance to run
	 * panic_notifiers and dumping kmsg before kdump.
	 * Note: since some panic_notifiers can make crashed kernel
	 * more unstable, it can increase risks of the kdump failure too.
264 265
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
266
	 */
267
	if (_crash_kexec_post_notifiers)
268
		__crash_kexec(NULL);
269

270 271 272 273
#ifdef CONFIG_VT
	unblank_screen();
#endif
	console_unblank();
274

275 276 277 278
	/*
	 * We may have ended up stopping the CPU holding the lock (in
	 * smp_send_stop()) while still having some valuable data in the console
	 * buffer.  Try to acquire the lock then release it regardless of the
279 280 281
	 * result.  The release will also print the buffers out.  Locks debug
	 * should be disabled to avoid reporting bad unlock balance when
	 * panic() is not being callled from OOPS.
282
	 */
283
	debug_locks_off();
284
	console_flush_on_panic(CONSOLE_FLUSH_PENDING);
285

286 287
	panic_print_sys_info();

288 289 290
	if (!panic_blink)
		panic_blink = no_blink;

291
	if (panic_timeout > 0) {
L
Linus Torvalds 已提交
292
		/*
I
Ingo Molnar 已提交
293 294 295
		 * Delay timeout seconds before rebooting the machine.
		 * We can't use the "normal" timers since we just panicked.
		 */
J
Jiri Slaby 已提交
296
		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
I
Ingo Molnar 已提交
297

298
		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
L
Linus Torvalds 已提交
299
			touch_nmi_watchdog();
300 301 302 303 304
			if (i >= i_next) {
				i += panic_blink(state ^= 1);
				i_next = i + 3600 / PANIC_BLINK_SPD;
			}
			mdelay(PANIC_TIMER_STEP);
L
Linus Torvalds 已提交
305
		}
306 307
	}
	if (panic_timeout != 0) {
I
Ingo Molnar 已提交
308 309 310 311
		/*
		 * This will not be a clean reboot, with everything
		 * shutting down.  But if there is a chance of
		 * rebooting the system it will be rebooted.
L
Linus Torvalds 已提交
312
		 */
313 314
		if (panic_reboot_mode != REBOOT_UNDEFINED)
			reboot_mode = panic_reboot_mode;
315
		emergency_restart();
L
Linus Torvalds 已提交
316 317 318 319
	}
#ifdef __sparc__
	{
		extern int stop_a_enabled;
320
		/* Make sure the user can actually press Stop-A (L1-A) */
L
Linus Torvalds 已提交
321
		stop_a_enabled = 1;
322 323
		pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
			 "twice on console to return to the boot prom\n");
L
Linus Torvalds 已提交
324 325
	}
#endif
326
#if defined(CONFIG_S390)
327
	disabled_wait();
L
Linus Torvalds 已提交
328
#endif
329
	pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
F
Feng Tang 已提交
330 331 332

	/* Do not scroll important messages printed above */
	suppress_printk = 1;
L
Linus Torvalds 已提交
333
	local_irq_enable();
334
	for (i = 0; ; i += PANIC_TIMER_STEP) {
335
		touch_softlockup_watchdog();
336 337 338 339 340
		if (i >= i_next) {
			i += panic_blink(state ^= 1);
			i_next = i + 3600 / PANIC_BLINK_SPD;
		}
		mdelay(PANIC_TIMER_STEP);
L
Linus Torvalds 已提交
341 342 343 344 345
	}
}

EXPORT_SYMBOL(panic);

346 347 348 349 350
/*
 * TAINT_FORCED_RMMOD could be a per-module flag but the module
 * is being removed anyway.
 */
const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
	[ TAINT_PROPRIETARY_MODULE ]	= { 'P', 'G', true },
	[ TAINT_FORCED_MODULE ]		= { 'F', ' ', true },
	[ TAINT_CPU_OUT_OF_SPEC ]	= { 'S', ' ', false },
	[ TAINT_FORCED_RMMOD ]		= { 'R', ' ', false },
	[ TAINT_MACHINE_CHECK ]		= { 'M', ' ', false },
	[ TAINT_BAD_PAGE ]		= { 'B', ' ', false },
	[ TAINT_USER ]			= { 'U', ' ', false },
	[ TAINT_DIE ]			= { 'D', ' ', false },
	[ TAINT_OVERRIDDEN_ACPI_TABLE ]	= { 'A', ' ', false },
	[ TAINT_WARN ]			= { 'W', ' ', false },
	[ TAINT_CRAP ]			= { 'C', ' ', true },
	[ TAINT_FIRMWARE_WORKAROUND ]	= { 'I', ' ', false },
	[ TAINT_OOT_MODULE ]		= { 'O', ' ', true },
	[ TAINT_UNSIGNED_MODULE ]	= { 'E', ' ', true },
	[ TAINT_SOFTLOCKUP ]		= { 'L', ' ', false },
	[ TAINT_LIVEPATCH ]		= { 'K', ' ', true },
	[ TAINT_AUX ]			= { 'X', ' ', true },
K
Kees Cook 已提交
368
	[ TAINT_RANDSTRUCT ]		= { 'T', ' ', true },
A
Andi Kleen 已提交
369 370
};

L
Linus Torvalds 已提交
371
/**
K
Kees Cook 已提交
372
 * print_tainted - return a string to represent the kernel taint state.
L
Linus Torvalds 已提交
373
 *
K
Kees Cook 已提交
374
 * For individual taint flag meanings, see Documentation/sysctl/kernel.txt
L
Linus Torvalds 已提交
375
 *
K
Kees Cook 已提交
376 377
 * The string is overwritten by the next call to print_tainted(),
 * but is always NULL terminated.
L
Linus Torvalds 已提交
378 379 380
 */
const char *print_tainted(void)
{
381
	static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")];
A
Andi Kleen 已提交
382

383 384
	BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);

A
Andi Kleen 已提交
385 386 387 388 389
	if (tainted_mask) {
		char *s;
		int i;

		s = buf + sprintf(buf, "Tainted: ");
390 391 392
		for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
			const struct taint_flag *t = &taint_flags[i];
			*s++ = test_bit(i, &tainted_mask) ?
393
					t->c_true : t->c_false;
A
Andi Kleen 已提交
394 395 396
		}
		*s = 0;
	} else
L
Linus Torvalds 已提交
397
		snprintf(buf, sizeof(buf), "Not tainted");
I
Ingo Molnar 已提交
398 399

	return buf;
L
Linus Torvalds 已提交
400 401
}

A
Andi Kleen 已提交
402
int test_taint(unsigned flag)
L
Linus Torvalds 已提交
403
{
A
Andi Kleen 已提交
404 405 406 407 408 409 410
	return test_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(test_taint);

unsigned long get_taint(void)
{
	return tainted_mask;
L
Linus Torvalds 已提交
411
}
412

413 414 415 416 417 418 419 420 421
/**
 * add_taint: add a taint flag if not already set.
 * @flag: one of the TAINT_* constants.
 * @lockdep_ok: whether lock debugging is still OK.
 *
 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
 * some notewortht-but-not-corrupting cases, it can be set to true.
 */
void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
422
{
423
	if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
424
		pr_warn("Disabling lock debugging due to kernel taint\n");
425

A
Andi Kleen 已提交
426
	set_bit(flag, &tainted_mask);
427
}
L
Linus Torvalds 已提交
428
EXPORT_SYMBOL(add_taint);
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479

static void spin_msec(int msecs)
{
	int i;

	for (i = 0; i < msecs; i++) {
		touch_nmi_watchdog();
		mdelay(1);
	}
}

/*
 * It just happens that oops_enter() and oops_exit() are identically
 * implemented...
 */
static void do_oops_enter_exit(void)
{
	unsigned long flags;
	static int spin_counter;

	if (!pause_on_oops)
		return;

	spin_lock_irqsave(&pause_on_oops_lock, flags);
	if (pause_on_oops_flag == 0) {
		/* This CPU may now print the oops message */
		pause_on_oops_flag = 1;
	} else {
		/* We need to stall this CPU */
		if (!spin_counter) {
			/* This CPU gets to do the counting */
			spin_counter = pause_on_oops;
			do {
				spin_unlock(&pause_on_oops_lock);
				spin_msec(MSEC_PER_SEC);
				spin_lock(&pause_on_oops_lock);
			} while (--spin_counter);
			pause_on_oops_flag = 0;
		} else {
			/* This CPU waits for a different one */
			while (spin_counter) {
				spin_unlock(&pause_on_oops_lock);
				spin_msec(1);
				spin_lock(&pause_on_oops_lock);
			}
		}
	}
	spin_unlock_irqrestore(&pause_on_oops_lock, flags);
}

/*
I
Ingo Molnar 已提交
480 481
 * Return true if the calling CPU is allowed to print oops-related info.
 * This is a bit racy..
482 483 484 485 486 487 488 489
 */
int oops_may_print(void)
{
	return pause_on_oops_flag == 0;
}

/*
 * Called when the architecture enters its oops handler, before it prints
I
Ingo Molnar 已提交
490 491
 * anything.  If this is the first CPU to oops, and it's oopsing the first
 * time then let it proceed.
492
 *
I
Ingo Molnar 已提交
493 494 495 496
 * This is all enabled by the pause_on_oops kernel boot option.  We do all
 * this to ensure that oopses don't scroll off the screen.  It has the
 * side-effect of preventing later-oopsing CPUs from mucking up the display,
 * too.
497
 *
I
Ingo Molnar 已提交
498 499 500
 * It turns out that the CPU which is allowed to print ends up pausing for
 * the right duration, whereas all the other CPUs pause for twice as long:
 * once in oops_enter(), once in oops_exit().
501 502 503
 */
void oops_enter(void)
{
504
	tracing_off();
I
Ingo Molnar 已提交
505 506
	/* can't trust the integrity of the kernel anymore: */
	debug_locks_off();
507 508 509
	do_oops_enter_exit();
}

A
Arjan van de Ven 已提交
510 511 512 513 514 515 516 517 518
/*
 * 64-bit random ID for oopses:
 */
static u64 oops_id;

static int init_oops_id(void)
{
	if (!oops_id)
		get_random_bytes(&oops_id, sizeof(oops_id));
519 520
	else
		oops_id++;
A
Arjan van de Ven 已提交
521 522 523 524 525

	return 0;
}
late_initcall(init_oops_id);

526
void print_oops_end_marker(void)
527 528
{
	init_oops_id();
529
	pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
530 531
}

532 533 534 535 536 537 538
/*
 * Called when the architecture exits its oops handler, after printing
 * everything.
 */
void oops_exit(void)
{
	do_oops_enter_exit();
539
	print_oops_end_marker();
540
	kmsg_dump(KMSG_DUMP_OOPS);
541
}
542

543
struct warn_args {
544
	const char *fmt;
545
	va_list args;
546
};
547

548 549
void __warn(const char *file, int line, void *caller, unsigned taint,
	    struct pt_regs *regs, struct warn_args *args)
550
{
551 552
	disable_trace_on_warning();

553 554
	if (args)
		pr_warn(CUT_HERE);
555 556 557 558 559 560 561 562

	if (file)
		pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
			raw_smp_processor_id(), current->pid, file, line,
			caller);
	else
		pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
			raw_smp_processor_id(), current->pid, caller);
563

564 565
	if (args)
		vprintk(args->fmt, args->args);
566

P
Prarit Bhargava 已提交
567 568 569 570 571 572 573 574 575 576 577
	if (panic_on_warn) {
		/*
		 * This thread may hit another WARN() in the panic path.
		 * Resetting this prevents additional WARN() from panicking the
		 * system on this thread.  Other threads are blocked by the
		 * panic_mutex in panic().
		 */
		panic_on_warn = 0;
		panic("panic_on_warn set ...\n");
	}

578
	print_modules();
579 580 581 582 583 584

	if (regs)
		show_regs(regs);
	else
		dump_stack();

585 586
	print_irqtrace_events(current);

587
	print_oops_end_marker();
588

589 590
	/* Just a warning, don't kill lockdep. */
	add_taint(taint, LOCKDEP_STILL_OK);
591
}
592

593
#ifdef WANT_WARN_ON_SLOWPATH
594 595
void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
{
596
	struct warn_args args;
597 598 599

	args.fmt = fmt;
	va_start(args.args, fmt);
600 601
	__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL,
	       &args);
602 603
	va_end(args.args);
}
604 605
EXPORT_SYMBOL(warn_slowpath_fmt);

606 607 608
void warn_slowpath_fmt_taint(const char *file, int line,
			     unsigned taint, const char *fmt, ...)
{
609
	struct warn_args args;
610 611 612

	args.fmt = fmt;
	va_start(args.args, fmt);
613
	__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
614 615 616 617
	va_end(args.args);
}
EXPORT_SYMBOL(warn_slowpath_fmt_taint);

618 619
void warn_slowpath_null(const char *file, int line)
{
620
	pr_warn(CUT_HERE);
621
	__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
622 623
}
EXPORT_SYMBOL(warn_slowpath_null);
624 625 626 627 628 629 630 631 632 633 634 635
#else
void __warn_printk(const char *fmt, ...)
{
	va_list args;

	pr_warn(CUT_HERE);

	va_start(args, fmt);
	vprintk(fmt, args);
	va_end(args);
}
EXPORT_SYMBOL(__warn_printk);
636 637
#endif

638 639 640 641 642 643
#ifdef CONFIG_BUG

/* Support resetting WARN*_ONCE state */

static int clear_warn_once_set(void *data, u64 val)
{
644
	generic_bug_clear_once();
645 646 647 648
	memset(__start_once, 0, __end_once - __start_once);
	return 0;
}

649 650
DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set,
			 "%lld\n");
651 652 653 654

static __init int register_warn_debugfs(void)
{
	/* Don't care about failure */
655 656
	debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL,
				   &clear_warn_once_fops);
657 658 659 660 661 662
	return 0;
}

device_initcall(register_warn_debugfs);
#endif

663
#ifdef CONFIG_STACKPROTECTOR
664

665 666 667 668
/*
 * Called when gcc's -fstack-protector feature is used, and
 * gcc detects corruption of the on-stack canary value
 */
669
__visible void __stack_chk_fail(void)
670
{
671
	panic("stack-protector: Kernel stack is corrupted in: %pB",
672
		__builtin_return_address(0));
673 674
}
EXPORT_SYMBOL(__stack_chk_fail);
675

676
#endif
677

678 679 680 681 682 683 684 685 686 687 688
#ifdef CONFIG_ARCH_HAS_REFCOUNT
void refcount_error_report(struct pt_regs *regs, const char *err)
{
	WARN_RATELIMIT(1, "refcount_t %s at %pB in %s[%d], uid/euid: %u/%u\n",
		err, (void *)instruction_pointer(regs),
		current->comm, task_pid_nr(current),
		from_kuid_munged(&init_user_ns, current_uid()),
		from_kuid_munged(&init_user_ns, current_euid()));
}
#endif

689
core_param(panic, panic_timeout, int, 0644);
690
core_param(panic_print, panic_print, ulong, 0644);
691
core_param(pause_on_oops, pause_on_oops, int, 0644);
P
Prarit Bhargava 已提交
692
core_param(panic_on_warn, panic_on_warn, int, 0644);
693
core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
694

695 696 697 698 699 700 701 702 703
static int __init oops_setup(char *s)
{
	if (!s)
		return -EINVAL;
	if (!strcmp(s, "panic"))
		panic_on_oops = 1;
	return 0;
}
early_param("oops", oops_setup);