panic.c 17.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/panic.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 * This function is used through-out the kernel (including mm and fs)
 * to indicate a major problem.
 */
I
Ingo Molnar 已提交
11
#include <linux/debug_locks.h>
12
#include <linux/sched/debug.h>
I
Ingo Molnar 已提交
13
#include <linux/interrupt.h>
14
#include <linux/kmsg_dump.h>
I
Ingo Molnar 已提交
15 16
#include <linux/kallsyms.h>
#include <linux/notifier.h>
17
#include <linux/vt_kern.h>
L
Linus Torvalds 已提交
18
#include <linux/module.h>
I
Ingo Molnar 已提交
19
#include <linux/random.h>
20
#include <linux/ftrace.h>
L
Linus Torvalds 已提交
21
#include <linux/reboot.h>
I
Ingo Molnar 已提交
22 23 24
#include <linux/delay.h>
#include <linux/kexec.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
25
#include <linux/sysrq.h>
I
Ingo Molnar 已提交
26
#include <linux/init.h>
L
Linus Torvalds 已提交
27
#include <linux/nmi.h>
28
#include <linux/console.h>
29
#include <linux/bug.h>
30
#include <linux/ratelimit.h>
31 32
#include <linux/debugfs.h>
#include <asm/sections.h>
L
Linus Torvalds 已提交
33

34 35 36
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18

37
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
K
Kees Cook 已提交
38 39
static unsigned long tainted_mask =
	IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
40 41 42
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
43
bool crash_kexec_post_notifiers;
P
Prarit Bhargava 已提交
44
int panic_on_warn __read_mostly;
L
Linus Torvalds 已提交
45

46
int panic_timeout = CONFIG_PANIC_TIMEOUT;
47
EXPORT_SYMBOL_GPL(panic_timeout);
L
Linus Torvalds 已提交
48

49 50 51 52 53
#define PANIC_PRINT_TASK_INFO		0x00000001
#define PANIC_PRINT_MEM_INFO		0x00000002
#define PANIC_PRINT_TIMER_INFO		0x00000004
#define PANIC_PRINT_LOCK_INFO		0x00000008
#define PANIC_PRINT_FTRACE_INFO		0x00000010
54
unsigned long panic_print;
55

56
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
L
Linus Torvalds 已提交
57 58 59

EXPORT_SYMBOL(panic_notifier_list);

60
static long no_blink(int state)
61
{
62
	return 0;
63 64
}

65 66 67 68
/* Returns how long it waited in ms */
long (*panic_blink)(int state);
EXPORT_SYMBOL(panic_blink);

69 70 71 72 73 74 75 76 77
/*
 * Stop ourself in panic -- architecture code may override this
 */
void __weak panic_smp_self_stop(void)
{
	while (1)
		cpu_relax();
}

78 79 80 81 82 83 84 85 86
/*
 * Stop ourselves in NMI context if another CPU has already panicked. Arch code
 * may override this to prepare for crash dumping, e.g. save regs info.
 */
void __weak nmi_panic_self_stop(struct pt_regs *regs)
{
	panic_smp_self_stop();
}

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/*
 * Stop other CPUs in panic.  Architecture dependent code may override this
 * with more suitable version.  For example, if the architecture supports
 * crash dump, it should save registers of each stopped CPU and disable
 * per-CPU features such as virtualization extensions.
 */
void __weak crash_smp_send_stop(void)
{
	static int cpus_stopped;

	/*
	 * This function can be called twice in panic path, but obviously
	 * we execute this only once.
	 */
	if (cpus_stopped)
		return;

	/*
	 * Note smp_send_stop is the usual smp shutdown function, which
	 * unfortunately means it may not be hardened to work in a panic
	 * situation.
	 */
	smp_send_stop();
	cpus_stopped = 1;
}

113 114
atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
/*
 * A variant of panic() called from NMI context. We return if we've already
 * panicked on this CPU. If another CPU already panicked, loop in
 * nmi_panic_self_stop() which can provide architecture dependent code such
 * as saving register state for crash dump.
 */
void nmi_panic(struct pt_regs *regs, const char *msg)
{
	int old_cpu, cpu;

	cpu = raw_smp_processor_id();
	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);

	if (old_cpu == PANIC_CPU_INVALID)
		panic("%s", msg);
	else if (old_cpu != cpu)
		nmi_panic_self_stop(regs);
}
EXPORT_SYMBOL(nmi_panic);

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
static void panic_print_sys_info(void)
{
	if (panic_print & PANIC_PRINT_TASK_INFO)
		show_state();

	if (panic_print & PANIC_PRINT_MEM_INFO)
		show_mem(0, NULL);

	if (panic_print & PANIC_PRINT_TIMER_INFO)
		sysrq_timer_list_show();

	if (panic_print & PANIC_PRINT_LOCK_INFO)
		debug_show_all_locks();

	if (panic_print & PANIC_PRINT_FTRACE_INFO)
		ftrace_dump(DUMP_ALL);
}

L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160
/**
 *	panic - halt the system
 *	@fmt: The text string to print
 *
 *	Display a message, then perform cleanups.
 *
 *	This function never returns.
 */
161
void panic(const char *fmt, ...)
L
Linus Torvalds 已提交
162 163 164
{
	static char buf[1024];
	va_list args;
165
	long i, i_next = 0, len;
166
	int state = 0;
167
	int old_cpu, this_cpu;
168
	bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
L
Linus Torvalds 已提交
169

170 171 172 173
	/*
	 * Disable local interrupts. This will prevent panic_smp_self_stop
	 * from deadlocking the first cpu that invokes the panic, since
	 * there is nothing to prevent an interrupt handler (that runs
174
	 * after setting panic_cpu) from invoking panic() again.
175 176 177
	 */
	local_irq_disable();

178
	/*
I
Ingo Molnar 已提交
179 180
	 * It's possible to come here directly from a panic-assertion and
	 * not have preempt disabled. Some functions called from here want
181
	 * preempt to be disabled. No point enabling it later though...
182 183 184 185 186
	 *
	 * Only one CPU is allowed to execute the panic code from here. For
	 * multiple parallel invocations of panic, all other CPUs either
	 * stop themself or will wait until they are stopped by the 1st CPU
	 * with smp_send_stop().
187 188 189 190 191
	 *
	 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
	 * comes here, so go ahead.
	 * `old_cpu == this_cpu' means we came from nmi_panic() which sets
	 * panic_cpu to this CPU.  In this case, this is also the 1st CPU.
192
	 */
193 194 195 196
	this_cpu = raw_smp_processor_id();
	old_cpu  = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);

	if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
197
		panic_smp_self_stop();
198

199
	console_verbose();
L
Linus Torvalds 已提交
200 201
	bust_spinlocks(1);
	va_start(args, fmt);
202
	len = vscnprintf(buf, sizeof(buf), fmt, args);
L
Linus Torvalds 已提交
203
	va_end(args);
204 205 206 207

	if (len && buf[len - 1] == '\n')
		buf[len - 1] = '\0';

208
	pr_emerg("Kernel panic - not syncing: %s\n", buf);
209
#ifdef CONFIG_DEBUG_BUGVERBOSE
210 211 212
	/*
	 * Avoid nested stack-dumping if a panic occurs during oops processing
	 */
213
	if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
214
		dump_stack();
215
#endif
L
Linus Torvalds 已提交
216

217 218 219
	/*
	 * If we have crashed and we have a crash kernel loaded let it handle
	 * everything else.
220 221
	 * If we want to run this after calling panic_notifiers, pass
	 * the "crash_kexec_post_notifiers" option to the kernel.
222 223
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
224
	 */
225
	if (!_crash_kexec_post_notifiers) {
226
		printk_safe_flush_on_panic();
227
		__crash_kexec(NULL);
228

229 230 231 232 233 234 235 236 237 238 239 240 241 242
		/*
		 * Note smp_send_stop is the usual smp shutdown function, which
		 * unfortunately means it may not be hardened to work in a
		 * panic situation.
		 */
		smp_send_stop();
	} else {
		/*
		 * If we want to do crash dump after notifier calls and
		 * kmsg_dump, we will need architecture dependent extra
		 * works in addition to stopping other CPUs.
		 */
		crash_smp_send_stop();
	}
L
Linus Torvalds 已提交
243

244 245 246 247
	/*
	 * Run any panic handlers, including those that might need to
	 * add information to the kmsg dump output.
	 */
248
	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
L
Linus Torvalds 已提交
249

250
	/* Call flush even twice. It tries harder with a single online CPU */
251
	printk_safe_flush_on_panic();
252 253
	kmsg_dump(KMSG_DUMP_PANIC);

254 255 256 257 258 259
	/*
	 * If you doubt kdump always works fine in any situation,
	 * "crash_kexec_post_notifiers" offers you a chance to run
	 * panic_notifiers and dumping kmsg before kdump.
	 * Note: since some panic_notifiers can make crashed kernel
	 * more unstable, it can increase risks of the kdump failure too.
260 261
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
262
	 */
263
	if (_crash_kexec_post_notifiers)
264
		__crash_kexec(NULL);
265

266 267 268 269
#ifdef CONFIG_VT
	unblank_screen();
#endif
	console_unblank();
270

271 272 273 274
	/*
	 * We may have ended up stopping the CPU holding the lock (in
	 * smp_send_stop()) while still having some valuable data in the console
	 * buffer.  Try to acquire the lock then release it regardless of the
275 276 277
	 * result.  The release will also print the buffers out.  Locks debug
	 * should be disabled to avoid reporting bad unlock balance when
	 * panic() is not being callled from OOPS.
278
	 */
279
	debug_locks_off();
280
	console_flush_on_panic();
281

282 283
	panic_print_sys_info();

284 285 286
	if (!panic_blink)
		panic_blink = no_blink;

287
	if (panic_timeout > 0) {
L
Linus Torvalds 已提交
288
		/*
I
Ingo Molnar 已提交
289 290 291
		 * Delay timeout seconds before rebooting the machine.
		 * We can't use the "normal" timers since we just panicked.
		 */
J
Jiri Slaby 已提交
292
		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
I
Ingo Molnar 已提交
293

294
		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
L
Linus Torvalds 已提交
295
			touch_nmi_watchdog();
296 297 298 299 300
			if (i >= i_next) {
				i += panic_blink(state ^= 1);
				i_next = i + 3600 / PANIC_BLINK_SPD;
			}
			mdelay(PANIC_TIMER_STEP);
L
Linus Torvalds 已提交
301
		}
302 303
	}
	if (panic_timeout != 0) {
I
Ingo Molnar 已提交
304 305 306 307
		/*
		 * This will not be a clean reboot, with everything
		 * shutting down.  But if there is a chance of
		 * rebooting the system it will be rebooted.
L
Linus Torvalds 已提交
308
		 */
309 310
		if (panic_reboot_mode != REBOOT_UNDEFINED)
			reboot_mode = panic_reboot_mode;
311
		emergency_restart();
L
Linus Torvalds 已提交
312 313 314 315
	}
#ifdef __sparc__
	{
		extern int stop_a_enabled;
316
		/* Make sure the user can actually press Stop-A (L1-A) */
L
Linus Torvalds 已提交
317
		stop_a_enabled = 1;
318 319
		pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
			 "twice on console to return to the boot prom\n");
L
Linus Torvalds 已提交
320 321
	}
#endif
322
#if defined(CONFIG_S390)
323
	disabled_wait();
L
Linus Torvalds 已提交
324
#endif
325
	pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
F
Feng Tang 已提交
326 327 328

	/* Do not scroll important messages printed above */
	suppress_printk = 1;
L
Linus Torvalds 已提交
329
	local_irq_enable();
330
	for (i = 0; ; i += PANIC_TIMER_STEP) {
331
		touch_softlockup_watchdog();
332 333 334 335 336
		if (i >= i_next) {
			i += panic_blink(state ^= 1);
			i_next = i + 3600 / PANIC_BLINK_SPD;
		}
		mdelay(PANIC_TIMER_STEP);
L
Linus Torvalds 已提交
337 338 339 340 341
	}
}

EXPORT_SYMBOL(panic);

342 343 344 345 346
/*
 * TAINT_FORCED_RMMOD could be a per-module flag but the module
 * is being removed anyway.
 */
const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
	[ TAINT_PROPRIETARY_MODULE ]	= { 'P', 'G', true },
	[ TAINT_FORCED_MODULE ]		= { 'F', ' ', true },
	[ TAINT_CPU_OUT_OF_SPEC ]	= { 'S', ' ', false },
	[ TAINT_FORCED_RMMOD ]		= { 'R', ' ', false },
	[ TAINT_MACHINE_CHECK ]		= { 'M', ' ', false },
	[ TAINT_BAD_PAGE ]		= { 'B', ' ', false },
	[ TAINT_USER ]			= { 'U', ' ', false },
	[ TAINT_DIE ]			= { 'D', ' ', false },
	[ TAINT_OVERRIDDEN_ACPI_TABLE ]	= { 'A', ' ', false },
	[ TAINT_WARN ]			= { 'W', ' ', false },
	[ TAINT_CRAP ]			= { 'C', ' ', true },
	[ TAINT_FIRMWARE_WORKAROUND ]	= { 'I', ' ', false },
	[ TAINT_OOT_MODULE ]		= { 'O', ' ', true },
	[ TAINT_UNSIGNED_MODULE ]	= { 'E', ' ', true },
	[ TAINT_SOFTLOCKUP ]		= { 'L', ' ', false },
	[ TAINT_LIVEPATCH ]		= { 'K', ' ', true },
	[ TAINT_AUX ]			= { 'X', ' ', true },
K
Kees Cook 已提交
364
	[ TAINT_RANDSTRUCT ]		= { 'T', ' ', true },
A
Andi Kleen 已提交
365 366
};

L
Linus Torvalds 已提交
367
/**
K
Kees Cook 已提交
368
 * print_tainted - return a string to represent the kernel taint state.
L
Linus Torvalds 已提交
369
 *
K
Kees Cook 已提交
370
 * For individual taint flag meanings, see Documentation/sysctl/kernel.txt
L
Linus Torvalds 已提交
371
 *
K
Kees Cook 已提交
372 373
 * The string is overwritten by the next call to print_tainted(),
 * but is always NULL terminated.
L
Linus Torvalds 已提交
374 375 376
 */
const char *print_tainted(void)
{
377
	static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")];
A
Andi Kleen 已提交
378

379 380
	BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);

A
Andi Kleen 已提交
381 382 383 384 385
	if (tainted_mask) {
		char *s;
		int i;

		s = buf + sprintf(buf, "Tainted: ");
386 387 388
		for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
			const struct taint_flag *t = &taint_flags[i];
			*s++ = test_bit(i, &tainted_mask) ?
389
					t->c_true : t->c_false;
A
Andi Kleen 已提交
390 391 392
		}
		*s = 0;
	} else
L
Linus Torvalds 已提交
393
		snprintf(buf, sizeof(buf), "Not tainted");
I
Ingo Molnar 已提交
394 395

	return buf;
L
Linus Torvalds 已提交
396 397
}

A
Andi Kleen 已提交
398
int test_taint(unsigned flag)
L
Linus Torvalds 已提交
399
{
A
Andi Kleen 已提交
400 401 402 403 404 405 406
	return test_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(test_taint);

unsigned long get_taint(void)
{
	return tainted_mask;
L
Linus Torvalds 已提交
407
}
408

409 410 411 412 413 414 415 416 417
/**
 * add_taint: add a taint flag if not already set.
 * @flag: one of the TAINT_* constants.
 * @lockdep_ok: whether lock debugging is still OK.
 *
 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
 * some notewortht-but-not-corrupting cases, it can be set to true.
 */
void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
418
{
419
	if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
420
		pr_warn("Disabling lock debugging due to kernel taint\n");
421

A
Andi Kleen 已提交
422
	set_bit(flag, &tainted_mask);
423
}
L
Linus Torvalds 已提交
424
EXPORT_SYMBOL(add_taint);
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475

static void spin_msec(int msecs)
{
	int i;

	for (i = 0; i < msecs; i++) {
		touch_nmi_watchdog();
		mdelay(1);
	}
}

/*
 * It just happens that oops_enter() and oops_exit() are identically
 * implemented...
 */
static void do_oops_enter_exit(void)
{
	unsigned long flags;
	static int spin_counter;

	if (!pause_on_oops)
		return;

	spin_lock_irqsave(&pause_on_oops_lock, flags);
	if (pause_on_oops_flag == 0) {
		/* This CPU may now print the oops message */
		pause_on_oops_flag = 1;
	} else {
		/* We need to stall this CPU */
		if (!spin_counter) {
			/* This CPU gets to do the counting */
			spin_counter = pause_on_oops;
			do {
				spin_unlock(&pause_on_oops_lock);
				spin_msec(MSEC_PER_SEC);
				spin_lock(&pause_on_oops_lock);
			} while (--spin_counter);
			pause_on_oops_flag = 0;
		} else {
			/* This CPU waits for a different one */
			while (spin_counter) {
				spin_unlock(&pause_on_oops_lock);
				spin_msec(1);
				spin_lock(&pause_on_oops_lock);
			}
		}
	}
	spin_unlock_irqrestore(&pause_on_oops_lock, flags);
}

/*
I
Ingo Molnar 已提交
476 477
 * Return true if the calling CPU is allowed to print oops-related info.
 * This is a bit racy..
478 479 480 481 482 483 484 485
 */
int oops_may_print(void)
{
	return pause_on_oops_flag == 0;
}

/*
 * Called when the architecture enters its oops handler, before it prints
I
Ingo Molnar 已提交
486 487
 * anything.  If this is the first CPU to oops, and it's oopsing the first
 * time then let it proceed.
488
 *
I
Ingo Molnar 已提交
489 490 491 492
 * This is all enabled by the pause_on_oops kernel boot option.  We do all
 * this to ensure that oopses don't scroll off the screen.  It has the
 * side-effect of preventing later-oopsing CPUs from mucking up the display,
 * too.
493
 *
I
Ingo Molnar 已提交
494 495 496
 * It turns out that the CPU which is allowed to print ends up pausing for
 * the right duration, whereas all the other CPUs pause for twice as long:
 * once in oops_enter(), once in oops_exit().
497 498 499
 */
void oops_enter(void)
{
500
	tracing_off();
I
Ingo Molnar 已提交
501 502
	/* can't trust the integrity of the kernel anymore: */
	debug_locks_off();
503 504 505
	do_oops_enter_exit();
}

A
Arjan van de Ven 已提交
506 507 508 509 510 511 512 513 514
/*
 * 64-bit random ID for oopses:
 */
static u64 oops_id;

static int init_oops_id(void)
{
	if (!oops_id)
		get_random_bytes(&oops_id, sizeof(oops_id));
515 516
	else
		oops_id++;
A
Arjan van de Ven 已提交
517 518 519 520 521

	return 0;
}
late_initcall(init_oops_id);

522
void print_oops_end_marker(void)
523 524
{
	init_oops_id();
525
	pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
526 527
}

528 529 530 531 532 533 534
/*
 * Called when the architecture exits its oops handler, after printing
 * everything.
 */
void oops_exit(void)
{
	do_oops_enter_exit();
535
	print_oops_end_marker();
536
	kmsg_dump(KMSG_DUMP_OOPS);
537
}
538

539
struct warn_args {
540
	const char *fmt;
541
	va_list args;
542
};
543

544 545
void __warn(const char *file, int line, void *caller, unsigned taint,
	    struct pt_regs *regs, struct warn_args *args)
546
{
547 548
	disable_trace_on_warning();

549 550
	if (args)
		pr_warn(CUT_HERE);
551 552 553 554 555 556 557 558

	if (file)
		pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
			raw_smp_processor_id(), current->pid, file, line,
			caller);
	else
		pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
			raw_smp_processor_id(), current->pid, caller);
559

560 561
	if (args)
		vprintk(args->fmt, args->args);
562

P
Prarit Bhargava 已提交
563 564 565 566 567 568 569 570 571 572 573
	if (panic_on_warn) {
		/*
		 * This thread may hit another WARN() in the panic path.
		 * Resetting this prevents additional WARN() from panicking the
		 * system on this thread.  Other threads are blocked by the
		 * panic_mutex in panic().
		 */
		panic_on_warn = 0;
		panic("panic_on_warn set ...\n");
	}

574
	print_modules();
575 576 577 578 579 580

	if (regs)
		show_regs(regs);
	else
		dump_stack();

581 582
	print_irqtrace_events(current);

583
	print_oops_end_marker();
584

585 586
	/* Just a warning, don't kill lockdep. */
	add_taint(taint, LOCKDEP_STILL_OK);
587
}
588

589
#ifdef WANT_WARN_ON_SLOWPATH
590 591
void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
{
592
	struct warn_args args;
593 594 595

	args.fmt = fmt;
	va_start(args.args, fmt);
596 597
	__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL,
	       &args);
598 599
	va_end(args.args);
}
600 601
EXPORT_SYMBOL(warn_slowpath_fmt);

602 603 604
void warn_slowpath_fmt_taint(const char *file, int line,
			     unsigned taint, const char *fmt, ...)
{
605
	struct warn_args args;
606 607 608

	args.fmt = fmt;
	va_start(args.args, fmt);
609
	__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
610 611 612 613
	va_end(args.args);
}
EXPORT_SYMBOL(warn_slowpath_fmt_taint);

614 615
void warn_slowpath_null(const char *file, int line)
{
616
	pr_warn(CUT_HERE);
617
	__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
618 619
}
EXPORT_SYMBOL(warn_slowpath_null);
620 621 622 623 624 625 626 627 628 629 630 631
#else
void __warn_printk(const char *fmt, ...)
{
	va_list args;

	pr_warn(CUT_HERE);

	va_start(args, fmt);
	vprintk(fmt, args);
	va_end(args);
}
EXPORT_SYMBOL(__warn_printk);
632 633
#endif

634 635 636 637 638 639
#ifdef CONFIG_BUG

/* Support resetting WARN*_ONCE state */

static int clear_warn_once_set(void *data, u64 val)
{
640
	generic_bug_clear_once();
641 642 643 644
	memset(__start_once, 0, __end_once - __start_once);
	return 0;
}

645 646
DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set,
			 "%lld\n");
647 648 649 650

static __init int register_warn_debugfs(void)
{
	/* Don't care about failure */
651 652
	debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL,
				   &clear_warn_once_fops);
653 654 655 656 657 658
	return 0;
}

device_initcall(register_warn_debugfs);
#endif

659
#ifdef CONFIG_STACKPROTECTOR
660

661 662 663 664
/*
 * Called when gcc's -fstack-protector feature is used, and
 * gcc detects corruption of the on-stack canary value
 */
665
__visible void __stack_chk_fail(void)
666
{
667
	panic("stack-protector: Kernel stack is corrupted in: %pB",
668
		__builtin_return_address(0));
669 670
}
EXPORT_SYMBOL(__stack_chk_fail);
671

672
#endif
673

674 675 676 677 678 679 680 681 682 683 684
#ifdef CONFIG_ARCH_HAS_REFCOUNT
void refcount_error_report(struct pt_regs *regs, const char *err)
{
	WARN_RATELIMIT(1, "refcount_t %s at %pB in %s[%d], uid/euid: %u/%u\n",
		err, (void *)instruction_pointer(regs),
		current->comm, task_pid_nr(current),
		from_kuid_munged(&init_user_ns, current_uid()),
		from_kuid_munged(&init_user_ns, current_euid()));
}
#endif

685
core_param(panic, panic_timeout, int, 0644);
686
core_param(panic_print, panic_print, ulong, 0644);
687
core_param(pause_on_oops, pause_on_oops, int, 0644);
P
Prarit Bhargava 已提交
688
core_param(panic_on_warn, panic_on_warn, int, 0644);
689
core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
690

691 692 693 694 695 696 697 698 699
static int __init oops_setup(char *s)
{
	if (!s)
		return -EINVAL;
	if (!strcmp(s, "panic"))
		panic_on_oops = 1;
	return 0;
}
early_param("oops", oops_setup);