printk.c 81.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/kernel/printk.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 * Modified to make sys_syslog() more flexible: added commands to
 * return the last 4k of kernel messages, regardless of whether
 * they've been read or not.  Added option to suppress kernel printk's
 * to the console.  Added hook for sending the console messages
 * elsewhere, in preparation for a serial line console (someday).
 * Ted Ts'o, 2/11/93.
 * Modified for sysctl support, 1/8/97, Chris Horn.
J
Jesper Juhl 已提交
13
 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
14
 *     manfred@colorfullife.com
L
Linus Torvalds 已提交
15
 * Rewrote bits to get rid of console_lock
16
 *	01Mar01 Andrew Morton
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24
 */

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/console.h>
#include <linux/init.h>
R
Randy Dunlap 已提交
25 26
#include <linux/jiffies.h>
#include <linux/nmi.h>
L
Linus Torvalds 已提交
27
#include <linux/module.h>
J
Jan Engelhardt 已提交
28
#include <linux/moduleparam.h>
L
Linus Torvalds 已提交
29 30 31 32
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <linux/bootmem.h>
33
#include <linux/memblock.h>
L
Linus Torvalds 已提交
34
#include <linux/syscalls.h>
35
#include <linux/kexec.h>
36
#include <linux/kdb.h>
37
#include <linux/ratelimit.h>
38
#include <linux/kmsg_dump.h>
39
#include <linux/syslog.h>
40 41
#include <linux/cpu.h>
#include <linux/notifier.h>
42
#include <linux/rculist.h>
43
#include <linux/poll.h>
44
#include <linux/irq_work.h>
45
#include <linux/utsname.h>
A
Alex Elder 已提交
46
#include <linux/ctype.h>
47
#include <linux/uio.h>
L
Linus Torvalds 已提交
48 49

#include <asm/uaccess.h>
50
#include <asm/sections.h>
L
Linus Torvalds 已提交
51

52 53 54
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>

J
Joe Perches 已提交
55
#include "console_cmdline.h"
56
#include "braille.h"
57
#include "internal.h"
J
Joe Perches 已提交
58

L
Linus Torvalds 已提交
59
int console_printk[4] = {
60
	CONSOLE_LOGLEVEL_DEFAULT,	/* console_loglevel */
61
	MESSAGE_LOGLEVEL_DEFAULT,	/* default_message_loglevel */
62 63
	CONSOLE_LOGLEVEL_MIN,		/* minimum_console_loglevel */
	CONSOLE_LOGLEVEL_DEFAULT,	/* default_console_loglevel */
L
Linus Torvalds 已提交
64 65 66
};

/*
P
Patrick Pletscher 已提交
67
 * Low level drivers may need that to know if they can schedule in
L
Linus Torvalds 已提交
68 69 70 71 72 73 74 75 76 77
 * their unblank() callback or not. So let's export it.
 */
int oops_in_progress;
EXPORT_SYMBOL(oops_in_progress);

/*
 * console_sem protects the console_drivers list, and also
 * provides serialisation for access to the entire console
 * driver system.
 */
78
static DEFINE_SEMAPHORE(console_sem);
L
Linus Torvalds 已提交
79
struct console *console_drivers;
I
Ingo Molnar 已提交
80 81
EXPORT_SYMBOL_GPL(console_drivers);

82 83 84 85 86 87
#ifdef CONFIG_LOCKDEP
static struct lockdep_map console_lock_dep_map = {
	.name = "console_lock"
};
#endif

88 89 90 91 92 93 94 95 96 97 98 99
/*
 * Number of registered extended console drivers.
 *
 * If extended consoles are present, in-kernel cont reassembly is disabled
 * and each fragment is stored as a separate log entry with proper
 * continuation flag so that every emitted message has full metadata.  This
 * doesn't change the result for regular consoles or /proc/kmsg.  For
 * /dev/kmsg, as long as the reader concatenates messages according to
 * consecutive continuation flags, the end result should be the same too.
 */
static int nr_ext_console_drivers;

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
/*
 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
 * macros instead of functions so that _RET_IP_ contains useful information.
 */
#define down_console_sem() do { \
	down(&console_sem);\
	mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
} while (0)

static int __down_trylock_console_sem(unsigned long ip)
{
	if (down_trylock(&console_sem))
		return 1;
	mutex_acquire(&console_lock_dep_map, 0, 1, ip);
	return 0;
}
#define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)

#define up_console_sem() do { \
	mutex_release(&console_lock_dep_map, 1, _RET_IP_);\
	up(&console_sem);\
} while (0)

L
Linus Torvalds 已提交
123 124 125 126
/*
 * This is used for debugging the mess that is the VT code by
 * keeping track if we have the console semaphore held. It's
 * definitely not the perfect debug tool (we don't know if _WE_
A
Alex Elder 已提交
127 128 129
 * hold it and are racing, but it helps tracking those weird code
 * paths in the console code where we end up in places I want
 * locked without the console sempahore held).
L
Linus Torvalds 已提交
130
 */
131
static int console_locked, console_suspended;
L
Linus Torvalds 已提交
132

133 134 135 136 137
/*
 * If exclusive_console is non-NULL then only this console is to be printed to.
 */
static struct console *exclusive_console;

L
Linus Torvalds 已提交
138 139 140 141 142 143 144
/*
 *	Array of consoles built from command line options (console=)
 */

#define MAX_CMDLINECONSOLES 8

static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
J
Joe Perches 已提交
145

L
Linus Torvalds 已提交
146 147
static int selected_console = -1;
static int preferred_console = -1;
148 149
int console_set_on_cmdline;
EXPORT_SYMBOL(console_set_on_cmdline);
L
Linus Torvalds 已提交
150 151 152 153

/* Flag: console code may call schedule() */
static int console_may_schedule;

154 155 156 157 158 159
/*
 * The printk log buffer consists of a chain of concatenated variable
 * length records. Every record starts with a record header, containing
 * the overall length of the record.
 *
 * The heads to the first and last entry in the buffer, as well as the
A
Alex Elder 已提交
160 161
 * sequence numbers of these entries are maintained when messages are
 * stored.
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
 *
 * If the heads indicate available messages, the length in the header
 * tells the start next message. A length == 0 for the next message
 * indicates a wrap-around to the beginning of the buffer.
 *
 * Every record carries the monotonic timestamp in microseconds, as well as
 * the standard userspace syslog level and syslog facility. The usual
 * kernel messages use LOG_KERN; userspace-injected messages always carry
 * a matching syslog facility, by default LOG_USER. The origin of every
 * message can be reliably determined that way.
 *
 * The human readable log message directly follows the message header. The
 * length of the message text is stored in the header, the stored message
 * is not terminated.
 *
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
 * Optionally, a message can carry a dictionary of properties (key/value pairs),
 * to provide userspace with a machine-readable message context.
 *
 * Examples for well-defined, commonly used property names are:
 *   DEVICE=b12:8               device identifier
 *                                b12:8         block dev_t
 *                                c127:3        char dev_t
 *                                n8            netdev ifindex
 *                                +sound:card0  subsystem:devname
 *   SUBSYSTEM=pci              driver-core subsystem name
 *
 * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
 * follows directly after a '=' character. Every property is terminated by
 * a '\0' character. The last property is not terminated.
 *
 * Example of a message structure:
 *   0000  ff 8f 00 00 00 00 00 00      monotonic time in nsec
 *   0008  34 00                        record is 52 bytes long
 *   000a        0b 00                  text is 11 bytes long
 *   000c              1f 00            dictionary is 23 bytes long
 *   000e                    03 00      LOG_KERN (facility) LOG_ERR (level)
 *   0010  69 74 27 73 20 61 20 6c      "it's a l"
 *         69 6e 65                     "ine"
 *   001b           44 45 56 49 43      "DEVIC"
 *         45 3d 62 38 3a 32 00 44      "E=b8:2\0D"
 *         52 49 56 45 52 3d 62 75      "RIVER=bu"
 *         67                           "g"
 *   0032     00 00 00                  padding to next message header
 *
206
 * The 'struct printk_log' buffer header must never be directly exported to
207 208 209 210
 * userspace, it is a kernel-private implementation detail that might
 * need to be changed in the future, when the requirements change.
 *
 * /dev/kmsg exports the structured data in the following line format:
211 212 213 214
 *   "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
 *
 * Users of the export format should ignore possible additional values
 * separated by ',', and find the message after the ';' character.
215 216 217 218
 *
 * The optional key/value pairs are attached as continuation lines starting
 * with a space character and terminated by a newline. All possible
 * non-prinatable characters are escaped in the "\xff" notation.
219 220
 */

221
enum log_flags {
222 223 224 225
	LOG_NOCONS	= 1,	/* already flushed, do not print to console */
	LOG_NEWLINE	= 2,	/* text ended with a newline */
	LOG_PREFIX	= 4,	/* text started with a prefix */
	LOG_CONT	= 8,	/* text is a fragment of a continuation line */
226 227
};

228
struct printk_log {
229 230 231 232
	u64 ts_nsec;		/* timestamp in nanoseconds */
	u16 len;		/* length of entire record */
	u16 text_len;		/* length of text buffer */
	u16 dict_len;		/* length of dictionary buffer */
233 234 235
	u8 facility;		/* syslog facility */
	u8 flags:5;		/* internal record flags */
	u8 level:3;		/* syslog level */
236 237 238 239 240
}
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
__packed __aligned(4)
#endif
;
241 242

/*
243 244 245
 * The logbuf_lock protects kmsg buffer, indices, counters.  This can be taken
 * within the scheduler's rq lock. It must be released before calling
 * console_unlock() or anything else that might wake up a process.
246
 */
247
DEFINE_RAW_SPINLOCK(logbuf_lock);
M
Matt Mackall 已提交
248

249
#ifdef CONFIG_PRINTK
250
DECLARE_WAIT_QUEUE_HEAD(log_wait);
251 252 253
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static u32 syslog_idx;
254
static enum log_flags syslog_prev;
255
static size_t syslog_partial;
256 257 258 259 260 261 262 263 264

/* index and sequence number of the first record stored in the buffer */
static u64 log_first_seq;
static u32 log_first_idx;

/* index and sequence number of the next record to store in the buffer */
static u64 log_next_seq;
static u32 log_next_idx;

265 266 267 268 269
/* the next printk record to write to the console */
static u64 console_seq;
static u32 console_idx;
static enum log_flags console_prev;

270 271 272 273
/* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
static u32 clear_idx;

274
#define PREFIX_MAX		32
A
Alex Elder 已提交
275
#define LOG_LINE_MAX		(1024 - PREFIX_MAX)
276

277 278 279
#define LOG_LEVEL(v)		((v) & 0x07)
#define LOG_FACILITY(v)		((v) >> 3 & 0xff)

280
/* record buffer */
281
#define LOG_ALIGN __alignof__(struct printk_log)
282
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
283
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
284 285 286
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;

287 288 289 290 291 292 293 294 295 296 297 298
/* Return log buffer address */
char *log_buf_addr_get(void)
{
	return log_buf;
}

/* Return log buffer size */
u32 log_buf_len_get(void)
{
	return log_buf_len;
}

299
/* human readable text of the record */
300
static char *log_text(const struct printk_log *msg)
301
{
302
	return (char *)msg + sizeof(struct printk_log);
303 304 305
}

/* optional key/value pair dictionary attached to the record */
306
static char *log_dict(const struct printk_log *msg)
307
{
308
	return (char *)msg + sizeof(struct printk_log) + msg->text_len;
309 310 311
}

/* get record by index; idx must point to valid msg */
312
static struct printk_log *log_from_idx(u32 idx)
313
{
314
	struct printk_log *msg = (struct printk_log *)(log_buf + idx);
315 316 317 318 319 320

	/*
	 * A length == 0 record is the end of buffer marker. Wrap around and
	 * read the message at the start of the buffer.
	 */
	if (!msg->len)
321
		return (struct printk_log *)log_buf;
322 323 324 325 326 327
	return msg;
}

/* get next record; idx must point to valid msg */
static u32 log_next(u32 idx)
{
328
	struct printk_log *msg = (struct printk_log *)(log_buf + idx);
329 330 331 332 333 334 335 336

	/* length == 0 indicates the end of the buffer; wrap */
	/*
	 * A length == 0 record is the end of buffer marker. Wrap around and
	 * read the message at the start of the buffer as *this* one, and
	 * return the one after that.
	 */
	if (!msg->len) {
337
		msg = (struct printk_log *)log_buf;
338 339 340 341 342
		return msg->len;
	}
	return idx + msg->len;
}

P
Petr Mladek 已提交
343 344 345 346 347 348 349 350 351 352
/*
 * Check whether there is enough free space for the given message.
 *
 * The same values of first_idx and next_idx mean that the buffer
 * is either empty or full.
 *
 * If the buffer is empty, we must respect the position of the indexes.
 * They cannot be reset to the beginning of the buffer.
 */
static int logbuf_has_space(u32 msg_size, bool empty)
353 354 355
{
	u32 free;

P
Petr Mladek 已提交
356
	if (log_next_idx > log_first_idx || empty)
357 358 359 360 361 362 363 364 365 366 367
		free = max(log_buf_len - log_next_idx, log_first_idx);
	else
		free = log_first_idx - log_next_idx;

	/*
	 * We need space also for an empty header that signalizes wrapping
	 * of the buffer.
	 */
	return free >= msg_size + sizeof(struct printk_log);
}

P
Petr Mladek 已提交
368
static int log_make_free_space(u32 msg_size)
369
{
370 371
	while (log_first_seq < log_next_seq &&
	       !logbuf_has_space(msg_size, false)) {
A
Alex Elder 已提交
372
		/* drop old messages until we have enough contiguous space */
373 374 375
		log_first_idx = log_next(log_first_idx);
		log_first_seq++;
	}
P
Petr Mladek 已提交
376

377 378 379 380 381
	if (clear_seq < log_first_seq) {
		clear_seq = log_first_seq;
		clear_idx = log_first_idx;
	}

P
Petr Mladek 已提交
382
	/* sequence numbers are equal, so the log buffer is empty */
383
	if (logbuf_has_space(msg_size, log_first_seq == log_next_seq))
P
Petr Mladek 已提交
384 385 386
		return 0;

	return -ENOMEM;
387 388
}

389 390 391 392 393 394 395 396 397 398 399 400
/* compute the message size including the padding bytes */
static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len)
{
	u32 size;

	size = sizeof(struct printk_log) + text_len + dict_len;
	*pad_len = (-size) & (LOG_ALIGN - 1);
	size += *pad_len;

	return size;
}

P
Petr Mladek 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
/*
 * Define how much of the log buffer we could take at maximum. The value
 * must be greater than two. Note that only half of the buffer is available
 * when the index points to the middle.
 */
#define MAX_LOG_TAKE_PART 4
static const char trunc_msg[] = "<truncated>";

static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len,
			u16 *dict_len, u32 *pad_len)
{
	/*
	 * The message should not take the whole buffer. Otherwise, it might
	 * get removed too soon.
	 */
	u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
	if (*text_len > max_text_len)
		*text_len = max_text_len;
	/* enable the warning message */
	*trunc_msg_len = strlen(trunc_msg);
	/* disable the "dict" completely */
	*dict_len = 0;
	/* compute the size again, count also the warning message */
	return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len);
}

427
/* insert record into the buffer, discard old ones, update heads */
428 429 430 431
static int log_store(int facility, int level,
		     enum log_flags flags, u64 ts_nsec,
		     const char *dict, u16 dict_len,
		     const char *text, u16 text_len)
432
{
433
	struct printk_log *msg;
434
	u32 size, pad_len;
P
Petr Mladek 已提交
435
	u16 trunc_msg_len = 0;
436 437

	/* number of '\0' padding bytes to next message */
438
	size = msg_used_size(text_len, dict_len, &pad_len);
439

P
Petr Mladek 已提交
440 441 442 443 444 445
	if (log_make_free_space(size)) {
		/* truncate the message if it is too long for empty buffer */
		size = truncate_msg(&text_len, &trunc_msg_len,
				    &dict_len, &pad_len);
		/* survive when the log buffer is too small for trunc_msg */
		if (log_make_free_space(size))
446
			return 0;
P
Petr Mladek 已提交
447
	}
448

449
	if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) {
450 451 452 453 454
		/*
		 * This message + an additional empty header does not fit
		 * at the end of the buffer. Add an empty header with len == 0
		 * to signify a wrap around.
		 */
455
		memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
456 457 458 459
		log_next_idx = 0;
	}

	/* fill message */
460
	msg = (struct printk_log *)(log_buf + log_next_idx);
461 462
	memcpy(log_text(msg), text, text_len);
	msg->text_len = text_len;
P
Petr Mladek 已提交
463 464 465 466
	if (trunc_msg_len) {
		memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len);
		msg->text_len += trunc_msg_len;
	}
467 468
	memcpy(log_dict(msg), dict, dict_len);
	msg->dict_len = dict_len;
469 470 471 472 473 474 475
	msg->facility = facility;
	msg->level = level & 7;
	msg->flags = flags & 0x1f;
	if (ts_nsec > 0)
		msg->ts_nsec = ts_nsec;
	else
		msg->ts_nsec = local_clock();
476
	memset(log_dict(msg) + dict_len, 0, pad_len);
477
	msg->len = size;
478 479 480 481

	/* insert message */
	log_next_idx += msg->len;
	log_next_seq++;
482 483

	return msg->text_len;
484
}
M
Matt Mackall 已提交
485

A
Alex Elder 已提交
486
int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
487 488 489 490 491 492 493 494 495 496 497 498 499

static int syslog_action_restricted(int type)
{
	if (dmesg_restrict)
		return 1;
	/*
	 * Unless restricted, we allow "read all" and "get buffer size"
	 * for everybody.
	 */
	return type != SYSLOG_ACTION_READ_ALL &&
	       type != SYSLOG_ACTION_SIZE_BUFFER;
}

500
int check_syslog_permissions(int type, int source)
501 502 503 504 505
{
	/*
	 * If this is from /proc/kmsg and we've already opened it, then we've
	 * already done the capabilities checks at open time.
	 */
506
	if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
507
		goto ok;
508 509 510

	if (syslog_action_restricted(type)) {
		if (capable(CAP_SYSLOG))
511
			goto ok;
512 513 514 515 516 517 518 519 520
		/*
		 * For historical reasons, accept CAP_SYS_ADMIN too, with
		 * a warning.
		 */
		if (capable(CAP_SYS_ADMIN)) {
			pr_warn_once("%s (%d): Attempt to access syslog with "
				     "CAP_SYS_ADMIN but no CAP_SYSLOG "
				     "(deprecated).\n",
				 current->comm, task_pid_nr(current));
521
			goto ok;
522 523 524
		}
		return -EPERM;
	}
525
ok:
526 527
	return security_syslog(type);
}
G
Geliang Tang 已提交
528
EXPORT_SYMBOL_GPL(check_syslog_permissions);
529

530 531 532 533 534
static void append_char(char **pp, char *e, char c)
{
	if (*pp < e)
		*(*pp)++ = c;
}
535

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
static ssize_t msg_print_ext_header(char *buf, size_t size,
				    struct printk_log *msg, u64 seq,
				    enum log_flags prev_flags)
{
	u64 ts_usec = msg->ts_nsec;
	char cont = '-';

	do_div(ts_usec, 1000);

	/*
	 * If we couldn't merge continuation line fragments during the print,
	 * export the stored flags to allow an optional external merge of the
	 * records. Merging the records isn't always neccessarily correct, like
	 * when we hit a race during printing. In most cases though, it produces
	 * better readable output. 'c' in the record flags mark the first
	 * fragment of a line, '+' the following.
	 */
	if (msg->flags & LOG_CONT && !(prev_flags & LOG_CONT))
		cont = 'c';
	else if ((msg->flags & LOG_CONT) ||
		 ((prev_flags & LOG_CONT) && !(msg->flags & LOG_PREFIX)))
		cont = '+';

	return scnprintf(buf, size, "%u,%llu,%llu,%c;",
		       (msg->facility << 3) | msg->level, seq, ts_usec, cont);
}

static ssize_t msg_print_ext_body(char *buf, size_t size,
				  char *dict, size_t dict_len,
				  char *text, size_t text_len)
{
	char *p = buf, *e = buf + size;
	size_t i;

	/* escape non-printable characters */
	for (i = 0; i < text_len; i++) {
		unsigned char c = text[i];

		if (c < ' ' || c >= 127 || c == '\\')
			p += scnprintf(p, e - p, "\\x%02x", c);
		else
			append_char(&p, e, c);
	}
	append_char(&p, e, '\n');

	if (dict_len) {
		bool line = true;

		for (i = 0; i < dict_len; i++) {
			unsigned char c = dict[i];

			if (line) {
				append_char(&p, e, ' ');
				line = false;
			}

			if (c == '\0') {
				append_char(&p, e, '\n');
				line = true;
				continue;
			}

			if (c < ' ' || c >= 127 || c == '\\') {
				p += scnprintf(p, e - p, "\\x%02x", c);
				continue;
			}

			append_char(&p, e, c);
		}
		append_char(&p, e, '\n');
	}

	return p - buf;
}

611 612 613 614
/* /dev/kmsg - userspace message inject/listen interface */
struct devkmsg_user {
	u64 seq;
	u32 idx;
615
	enum log_flags prev;
616
	struct mutex lock;
617
	char buf[CONSOLE_EXT_LOG_MAX];
618 619
};

A
Al Viro 已提交
620
static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
621 622 623 624
{
	char *buf, *line;
	int level = default_message_loglevel;
	int facility = 1;	/* LOG_USER */
C
Christoph Hellwig 已提交
625
	size_t len = iov_iter_count(from);
626 627 628 629 630 631 632 633
	ssize_t ret = len;

	if (len > LOG_LINE_MAX)
		return -EINVAL;
	buf = kmalloc(len+1, GFP_KERNEL);
	if (buf == NULL)
		return -ENOMEM;

A
Al Viro 已提交
634 635 636 637
	buf[len] = '\0';
	if (copy_from_iter(buf, len, from) != len) {
		kfree(buf);
		return -EFAULT;
638 639 640 641 642 643 644 645 646 647 648 649 650 651
	}

	/*
	 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
	 * the decimal value represents 32bit, the lower 3 bit are the log
	 * level, the rest are the log facility.
	 *
	 * If no prefix or no userspace facility is specified, we
	 * enforce LOG_USER, to be able to reliably distinguish
	 * kernel-generated messages from userspace-injected ones.
	 */
	line = buf;
	if (line[0] == '<') {
		char *endp = NULL;
652
		unsigned int u;
653

654
		u = simple_strtoul(line + 1, &endp, 10);
655
		if (endp && endp[0] == '>') {
656 657 658
			level = LOG_LEVEL(u);
			if (LOG_FACILITY(u) != 0)
				facility = LOG_FACILITY(u);
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
			endp++;
			len -= endp - line;
			line = endp;
		}
	}

	printk_emit(facility, level, NULL, 0, "%s", line);
	kfree(buf);
	return ret;
}

static ssize_t devkmsg_read(struct file *file, char __user *buf,
			    size_t count, loff_t *ppos)
{
	struct devkmsg_user *user = file->private_data;
674
	struct printk_log *msg;
675 676 677 678 679 680
	size_t len;
	ssize_t ret;

	if (!user)
		return -EBADF;

681 682 683
	ret = mutex_lock_interruptible(&user->lock);
	if (ret)
		return ret;
684
	raw_spin_lock_irq(&logbuf_lock);
685 686 687
	while (user->seq == log_next_seq) {
		if (file->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
688
			raw_spin_unlock_irq(&logbuf_lock);
689 690 691
			goto out;
		}

692
		raw_spin_unlock_irq(&logbuf_lock);
693 694 695 696
		ret = wait_event_interruptible(log_wait,
					       user->seq != log_next_seq);
		if (ret)
			goto out;
697
		raw_spin_lock_irq(&logbuf_lock);
698 699 700 701 702 703 704
	}

	if (user->seq < log_first_seq) {
		/* our last seen message is gone, return error and reset */
		user->idx = log_first_idx;
		user->seq = log_first_seq;
		ret = -EPIPE;
705
		raw_spin_unlock_irq(&logbuf_lock);
706 707 708 709
		goto out;
	}

	msg = log_from_idx(user->idx);
710 711 712 713 714
	len = msg_print_ext_header(user->buf, sizeof(user->buf),
				   msg, user->seq, user->prev);
	len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
				  log_dict(msg), msg->dict_len,
				  log_text(msg), msg->text_len);
715 716

	user->prev = msg->flags;
717 718
	user->idx = log_next(user->idx);
	user->seq++;
719
	raw_spin_unlock_irq(&logbuf_lock);
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745

	if (len > count) {
		ret = -EINVAL;
		goto out;
	}

	if (copy_to_user(buf, user->buf, len)) {
		ret = -EFAULT;
		goto out;
	}
	ret = len;
out:
	mutex_unlock(&user->lock);
	return ret;
}

static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
{
	struct devkmsg_user *user = file->private_data;
	loff_t ret = 0;

	if (!user)
		return -EBADF;
	if (offset)
		return -ESPIPE;

746
	raw_spin_lock_irq(&logbuf_lock);
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	switch (whence) {
	case SEEK_SET:
		/* the first record */
		user->idx = log_first_idx;
		user->seq = log_first_seq;
		break;
	case SEEK_DATA:
		/*
		 * The first record after the last SYSLOG_ACTION_CLEAR,
		 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
		 * changes no global state, and does not clear anything.
		 */
		user->idx = clear_idx;
		user->seq = clear_seq;
		break;
	case SEEK_END:
		/* after the last record */
		user->idx = log_next_idx;
		user->seq = log_next_seq;
		break;
	default:
		ret = -EINVAL;
	}
770
	raw_spin_unlock_irq(&logbuf_lock);
771 772 773 774 775 776 777 778 779 780 781 782 783
	return ret;
}

static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
{
	struct devkmsg_user *user = file->private_data;
	int ret = 0;

	if (!user)
		return POLLERR|POLLNVAL;

	poll_wait(file, &log_wait, wait);

784
	raw_spin_lock_irq(&logbuf_lock);
785 786 787 788
	if (user->seq < log_next_seq) {
		/* return error when data has vanished underneath us */
		if (user->seq < log_first_seq)
			ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
789 790
		else
			ret = POLLIN|POLLRDNORM;
791
	}
792
	raw_spin_unlock_irq(&logbuf_lock);
793 794 795 796 797 798 799 800 801 802 803 804 805

	return ret;
}

static int devkmsg_open(struct inode *inode, struct file *file)
{
	struct devkmsg_user *user;
	int err;

	/* write-only does not need any file context */
	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
		return 0;

806 807
	err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
				       SYSLOG_FROM_READER);
808 809 810 811 812 813 814 815 816
	if (err)
		return err;

	user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
	if (!user)
		return -ENOMEM;

	mutex_init(&user->lock);

817
	raw_spin_lock_irq(&logbuf_lock);
818 819
	user->idx = log_first_idx;
	user->seq = log_first_seq;
820
	raw_spin_unlock_irq(&logbuf_lock);
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840

	file->private_data = user;
	return 0;
}

static int devkmsg_release(struct inode *inode, struct file *file)
{
	struct devkmsg_user *user = file->private_data;

	if (!user)
		return 0;

	mutex_destroy(&user->lock);
	kfree(user);
	return 0;
}

const struct file_operations kmsg_fops = {
	.open = devkmsg_open,
	.read = devkmsg_read,
A
Al Viro 已提交
841
	.write_iter = devkmsg_write,
842 843 844 845 846
	.llseek = devkmsg_llseek,
	.poll = devkmsg_poll,
	.release = devkmsg_release,
};

847
#ifdef CONFIG_KEXEC_CORE
848
/*
849
 * This appends the listed symbols to /proc/vmcore
850
 *
851
 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
852 853 854 855 856 857 858 859
 * obtain access to symbols that are otherwise very difficult to locate.  These
 * symbols are specifically used so that utilities can access and extract the
 * dmesg log from a vmcore file after a crash.
 */
void log_buf_kexec_setup(void)
{
	VMCOREINFO_SYMBOL(log_buf);
	VMCOREINFO_SYMBOL(log_buf_len);
860
	VMCOREINFO_SYMBOL(log_first_idx);
861
	VMCOREINFO_SYMBOL(clear_idx);
862
	VMCOREINFO_SYMBOL(log_next_idx);
863
	/*
864
	 * Export struct printk_log size and field offsets. User space tools can
865 866
	 * parse it and detect any changes to structure down the line.
	 */
867 868 869 870 871
	VMCOREINFO_STRUCT_SIZE(printk_log);
	VMCOREINFO_OFFSET(printk_log, ts_nsec);
	VMCOREINFO_OFFSET(printk_log, len);
	VMCOREINFO_OFFSET(printk_log, text_len);
	VMCOREINFO_OFFSET(printk_log, dict_len);
872 873 874
}
#endif

875 876 877
/* requested log_buf_len from kernel cmdline */
static unsigned long __initdata new_log_buf_len;

878 879
/* we practice scaling the ring buffer by powers of 2 */
static void __init log_buf_len_update(unsigned size)
L
Linus Torvalds 已提交
880 881 882
{
	if (size)
		size = roundup_pow_of_two(size);
883 884
	if (size > log_buf_len)
		new_log_buf_len = size;
885 886 887 888 889 890 891 892
}

/* save requested log_buf_len since it's too early to process it */
static int __init log_buf_len_setup(char *str)
{
	unsigned size = memparse(str, &str);

	log_buf_len_update(size);
893 894

	return 0;
L
Linus Torvalds 已提交
895
}
896 897
early_param("log_buf_len", log_buf_len_setup);

898 899 900
#ifdef CONFIG_SMP
#define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)

901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
static void __init log_buf_add_cpu(void)
{
	unsigned int cpu_extra;

	/*
	 * archs should set up cpu_possible_bits properly with
	 * set_cpu_possible() after setup_arch() but just in
	 * case lets ensure this is valid.
	 */
	if (num_possible_cpus() == 1)
		return;

	cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;

	/* by default this will only continue through for large > 64 CPUs */
	if (cpu_extra <= __LOG_BUF_LEN / 2)
		return;

	pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
		__LOG_CPU_MAX_BUF_LEN);
	pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
		cpu_extra);
	pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);

	log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
}
927 928 929
#else /* !CONFIG_SMP */
static inline void log_buf_add_cpu(void) {}
#endif /* CONFIG_SMP */
930

931 932 933 934 935 936
void __init setup_log_buf(int early)
{
	unsigned long flags;
	char *new_log_buf;
	int free;

937 938 939 940 941 942
	if (log_buf != __log_buf)
		return;

	if (!early && !new_log_buf_len)
		log_buf_add_cpu();

943 944
	if (!new_log_buf_len)
		return;
L
Linus Torvalds 已提交
945

946
	if (early) {
947
		new_log_buf =
948
			memblock_virt_alloc(new_log_buf_len, LOG_ALIGN);
949
	} else {
950 951
		new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len,
							  LOG_ALIGN);
952 953 954 955 956 957 958 959
	}

	if (unlikely(!new_log_buf)) {
		pr_err("log_buf_len: %ld bytes not available\n",
			new_log_buf_len);
		return;
	}

960
	raw_spin_lock_irqsave(&logbuf_lock, flags);
961 962 963
	log_buf_len = new_log_buf_len;
	log_buf = new_log_buf;
	new_log_buf_len = 0;
964 965
	free = __LOG_BUF_LEN - log_next_idx;
	memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
966
	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
967

968
	pr_info("log_buf_len: %d bytes\n", log_buf_len);
969 970 971
	pr_info("early log buf free: %d(%d%%)\n",
		free, (free * 100) / __LOG_BUF_LEN);
}
L
Linus Torvalds 已提交
972

973 974 975 976
static bool __read_mostly ignore_loglevel;

static int __init ignore_loglevel_setup(char *str)
{
977
	ignore_loglevel = true;
978
	pr_info("debug: ignoring loglevel setting.\n");
979 980 981 982 983 984

	return 0;
}

early_param("ignore_loglevel", ignore_loglevel_setup);
module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
985 986
MODULE_PARM_DESC(ignore_loglevel,
		 "ignore loglevel setting (prints all kernel messages to the console)");
987

988 989 990 991 992
static bool suppress_message_printing(int level)
{
	return (level >= console_loglevel && !ignore_loglevel);
}

R
Randy Dunlap 已提交
993 994
#ifdef CONFIG_BOOT_PRINTK_DELAY

995
static int boot_delay; /* msecs delay after each printk during bootup */
996
static unsigned long long loops_per_msec;	/* based on boot_delay */
R
Randy Dunlap 已提交
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008

static int __init boot_delay_setup(char *str)
{
	unsigned long lpj;

	lpj = preset_lpj ? preset_lpj : 1000000;	/* some guess */
	loops_per_msec = (unsigned long long)lpj / 1000 * HZ;

	get_option(&str, &boot_delay);
	if (boot_delay > 10 * 1000)
		boot_delay = 0;

1009 1010 1011
	pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
		"HZ: %d, loops_per_msec: %llu\n",
		boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1012
	return 0;
R
Randy Dunlap 已提交
1013
}
1014
early_param("boot_delay", boot_delay_setup);
R
Randy Dunlap 已提交
1015

1016
static void boot_delay_msec(int level)
R
Randy Dunlap 已提交
1017 1018 1019 1020
{
	unsigned long long k;
	unsigned long timeout;

1021
	if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
1022
		|| suppress_message_printing(level)) {
R
Randy Dunlap 已提交
1023
		return;
1024
	}
R
Randy Dunlap 已提交
1025

1026
	k = (unsigned long long)loops_per_msec * boot_delay;
R
Randy Dunlap 已提交
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042

	timeout = jiffies + msecs_to_jiffies(boot_delay);
	while (k) {
		k--;
		cpu_relax();
		/*
		 * use (volatile) jiffies to prevent
		 * compiler reduction; loop termination via jiffies
		 * is secondary and may or may not happen.
		 */
		if (time_after(jiffies, timeout))
			break;
		touch_nmi_watchdog();
	}
}
#else
1043
static inline void boot_delay_msec(int level)
R
Randy Dunlap 已提交
1044 1045 1046 1047
{
}
#endif

A
Alex Elder 已提交
1048
static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1049 1050
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);

1051 1052 1053 1054 1055 1056 1057
static size_t print_time(u64 ts, char *buf)
{
	unsigned long rem_nsec;

	if (!printk_time)
		return 0;

1058 1059
	rem_nsec = do_div(ts, 1000000000);

1060
	if (!buf)
1061
		return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
1062 1063 1064 1065 1066

	return sprintf(buf, "[%5lu.%06lu] ",
		       (unsigned long)ts, rem_nsec / 1000);
}

1067
static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
1068
{
1069
	size_t len = 0;
1070
	unsigned int prefix = (msg->facility << 3) | msg->level;
1071

1072 1073
	if (syslog) {
		if (buf) {
1074
			len += sprintf(buf, "<%u>", prefix);
1075 1076
		} else {
			len += 3;
1077 1078 1079 1080 1081
			if (prefix > 999)
				len += 3;
			else if (prefix > 99)
				len += 2;
			else if (prefix > 9)
1082 1083 1084
				len++;
		}
	}
1085

1086
	len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
1087
	return len;
1088 1089
}

1090
static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
1091
			     bool syslog, char *buf, size_t size)
1092
{
1093 1094
	const char *text = log_text(msg);
	size_t text_size = msg->text_len;
1095 1096
	bool prefix = true;
	bool newline = true;
1097 1098
	size_t len = 0;

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
	if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
		prefix = false;

	if (msg->flags & LOG_CONT) {
		if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
			prefix = false;

		if (!(msg->flags & LOG_NEWLINE))
			newline = false;
	}

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
	do {
		const char *next = memchr(text, '\n', text_size);
		size_t text_len;

		if (next) {
			text_len = next - text;
			next++;
			text_size -= next - text;
		} else {
			text_len = text_size;
		}
1121

1122 1123
		if (buf) {
			if (print_prefix(msg, syslog, NULL) +
1124
			    text_len + 1 >= size - len)
1125
				break;
1126

1127 1128
			if (prefix)
				len += print_prefix(msg, syslog, buf + len);
1129 1130
			memcpy(buf + len, text, text_len);
			len += text_len;
1131 1132
			if (next || newline)
				buf[len++] = '\n';
1133 1134
		} else {
			/* SYSLOG_ACTION_* buffer size only calculation */
1135 1136 1137 1138 1139
			if (prefix)
				len += print_prefix(msg, syslog, NULL);
			len += text_len;
			if (next || newline)
				len++;
1140
		}
1141

1142
		prefix = true;
1143 1144
		text = next;
	} while (text);
1145 1146 1147 1148 1149 1150 1151

	return len;
}

static int syslog_print(char __user *buf, int size)
{
	char *text;
1152
	struct printk_log *msg;
1153
	int len = 0;
1154

1155
	text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
1156 1157 1158
	if (!text)
		return -ENOMEM;

1159 1160
	while (size > 0) {
		size_t n;
1161
		size_t skip;
1162 1163 1164 1165 1166 1167

		raw_spin_lock_irq(&logbuf_lock);
		if (syslog_seq < log_first_seq) {
			/* messages are gone, move to first one */
			syslog_seq = log_first_seq;
			syslog_idx = log_first_idx;
1168
			syslog_prev = 0;
1169
			syslog_partial = 0;
1170 1171 1172 1173 1174
		}
		if (syslog_seq == log_next_seq) {
			raw_spin_unlock_irq(&logbuf_lock);
			break;
		}
1175 1176

		skip = syslog_partial;
1177
		msg = log_from_idx(syslog_idx);
1178 1179
		n = msg_print_text(msg, syslog_prev, true, text,
				   LOG_LINE_MAX + PREFIX_MAX);
1180 1181
		if (n - syslog_partial <= size) {
			/* message fits into buffer, move forward */
1182 1183
			syslog_idx = log_next(syslog_idx);
			syslog_seq++;
1184
			syslog_prev = msg->flags;
1185 1186 1187 1188 1189 1190
			n -= syslog_partial;
			syslog_partial = 0;
		} else if (!len){
			/* partial read(), remember position */
			n = size;
			syslog_partial += n;
1191 1192 1193 1194 1195 1196 1197
		} else
			n = 0;
		raw_spin_unlock_irq(&logbuf_lock);

		if (!n)
			break;

1198
		if (copy_to_user(buf, text + skip, n)) {
1199 1200 1201 1202
			if (!len)
				len = -EFAULT;
			break;
		}
1203 1204 1205 1206

		len += n;
		size -= n;
		buf += n;
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
	}

	kfree(text);
	return len;
}

static int syslog_print_all(char __user *buf, int size, bool clear)
{
	char *text;
	int len = 0;

1218
	text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
1219 1220 1221 1222 1223 1224 1225 1226
	if (!text)
		return -ENOMEM;

	raw_spin_lock_irq(&logbuf_lock);
	if (buf) {
		u64 next_seq;
		u64 seq;
		u32 idx;
1227
		enum log_flags prev;
1228 1229 1230 1231

		/*
		 * Find first record that fits, including all following records,
		 * into the user-provided buffer for this dump.
1232
		 */
1233 1234
		seq = clear_seq;
		idx = clear_idx;
1235
		prev = 0;
1236
		while (seq < log_next_seq) {
1237
			struct printk_log *msg = log_from_idx(idx);
1238

1239
			len += msg_print_text(msg, prev, true, NULL, 0);
1240
			prev = msg->flags;
1241 1242 1243
			idx = log_next(idx);
			seq++;
		}
1244 1245

		/* move first record forward until length fits into the buffer */
1246 1247
		seq = clear_seq;
		idx = clear_idx;
1248
		prev = 0;
1249
		while (len > size && seq < log_next_seq) {
1250
			struct printk_log *msg = log_from_idx(idx);
1251

1252
			len -= msg_print_text(msg, prev, true, NULL, 0);
1253
			prev = msg->flags;
1254 1255 1256 1257
			idx = log_next(idx);
			seq++;
		}

1258
		/* last message fitting into this dump */
1259 1260 1261 1262
		next_seq = log_next_seq;

		len = 0;
		while (len >= 0 && seq < next_seq) {
1263
			struct printk_log *msg = log_from_idx(idx);
1264 1265
			int textlen;

1266 1267
			textlen = msg_print_text(msg, prev, true, text,
						 LOG_LINE_MAX + PREFIX_MAX);
1268 1269 1270 1271 1272 1273
			if (textlen < 0) {
				len = textlen;
				break;
			}
			idx = log_next(idx);
			seq++;
1274
			prev = msg->flags;
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286

			raw_spin_unlock_irq(&logbuf_lock);
			if (copy_to_user(buf + len, text, textlen))
				len = -EFAULT;
			else
				len += textlen;
			raw_spin_lock_irq(&logbuf_lock);

			if (seq < log_first_seq) {
				/* messages are gone, move to next one */
				seq = log_first_seq;
				idx = log_first_idx;
1287
				prev = 0;
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
			}
		}
	}

	if (clear) {
		clear_seq = log_next_seq;
		clear_idx = log_next_idx;
	}
	raw_spin_unlock_irq(&logbuf_lock);

	kfree(text);
	return len;
}

1302
int do_syslog(int type, char __user *buf, int len, int source)
L
Linus Torvalds 已提交
1303
{
1304
	bool clear = false;
1305
	static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1306
	int error;
L
Linus Torvalds 已提交
1307

1308
	error = check_syslog_permissions(type, source);
1309 1310
	if (error)
		goto out;
1311

L
Linus Torvalds 已提交
1312
	switch (type) {
1313
	case SYSLOG_ACTION_CLOSE:	/* Close log */
L
Linus Torvalds 已提交
1314
		break;
1315
	case SYSLOG_ACTION_OPEN:	/* Open log */
L
Linus Torvalds 已提交
1316
		break;
1317
	case SYSLOG_ACTION_READ:	/* Read from log */
L
Linus Torvalds 已提交
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
		error = -EINVAL;
		if (!buf || len < 0)
			goto out;
		error = 0;
		if (!len)
			goto out;
		if (!access_ok(VERIFY_WRITE, buf, len)) {
			error = -EFAULT;
			goto out;
		}
J
Jesper Juhl 已提交
1328
		error = wait_event_interruptible(log_wait,
1329
						 syslog_seq != log_next_seq);
1330
		if (error)
L
Linus Torvalds 已提交
1331
			goto out;
1332
		error = syslog_print(buf, len);
L
Linus Torvalds 已提交
1333
		break;
1334 1335
	/* Read/clear last kernel messages */
	case SYSLOG_ACTION_READ_CLEAR:
1336
		clear = true;
L
Linus Torvalds 已提交
1337
		/* FALL THRU */
1338 1339
	/* Read last kernel messages */
	case SYSLOG_ACTION_READ_ALL:
L
Linus Torvalds 已提交
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
		error = -EINVAL;
		if (!buf || len < 0)
			goto out;
		error = 0;
		if (!len)
			goto out;
		if (!access_ok(VERIFY_WRITE, buf, len)) {
			error = -EFAULT;
			goto out;
		}
1350
		error = syslog_print_all(buf, len, clear);
L
Linus Torvalds 已提交
1351
		break;
1352 1353
	/* Clear ring buffer */
	case SYSLOG_ACTION_CLEAR:
1354
		syslog_print_all(NULL, 0, true);
1355
		break;
1356 1357
	/* Disable logging to console */
	case SYSLOG_ACTION_CONSOLE_OFF:
1358
		if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1359
			saved_console_loglevel = console_loglevel;
L
Linus Torvalds 已提交
1360 1361
		console_loglevel = minimum_console_loglevel;
		break;
1362 1363
	/* Enable logging to console */
	case SYSLOG_ACTION_CONSOLE_ON:
1364
		if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1365
			console_loglevel = saved_console_loglevel;
1366
			saved_console_loglevel = LOGLEVEL_DEFAULT;
1367
		}
L
Linus Torvalds 已提交
1368
		break;
1369 1370
	/* Set level of messages printed to console */
	case SYSLOG_ACTION_CONSOLE_LEVEL:
L
Linus Torvalds 已提交
1371 1372 1373 1374 1375 1376
		error = -EINVAL;
		if (len < 1 || len > 8)
			goto out;
		if (len < minimum_console_loglevel)
			len = minimum_console_loglevel;
		console_loglevel = len;
1377
		/* Implicitly re-enable logging to console */
1378
		saved_console_loglevel = LOGLEVEL_DEFAULT;
L
Linus Torvalds 已提交
1379 1380
		error = 0;
		break;
1381 1382
	/* Number of chars in the log buffer */
	case SYSLOG_ACTION_SIZE_UNREAD:
1383 1384 1385 1386 1387
		raw_spin_lock_irq(&logbuf_lock);
		if (syslog_seq < log_first_seq) {
			/* messages are gone, move to first one */
			syslog_seq = log_first_seq;
			syslog_idx = log_first_idx;
1388
			syslog_prev = 0;
1389
			syslog_partial = 0;
1390
		}
1391
		if (source == SYSLOG_FROM_PROC) {
1392 1393 1394 1395 1396
			/*
			 * Short-cut for poll(/"proc/kmsg") which simply checks
			 * for pending data, not the size; return the count of
			 * records, not the length.
			 */
1397
			error = log_next_seq - syslog_seq;
1398
		} else {
1399 1400 1401
			u64 seq = syslog_seq;
			u32 idx = syslog_idx;
			enum log_flags prev = syslog_prev;
1402 1403 1404

			error = 0;
			while (seq < log_next_seq) {
1405
				struct printk_log *msg = log_from_idx(idx);
1406

1407
				error += msg_print_text(msg, prev, true, NULL, 0);
1408 1409
				idx = log_next(idx);
				seq++;
1410
				prev = msg->flags;
1411
			}
1412
			error -= syslog_partial;
1413 1414
		}
		raw_spin_unlock_irq(&logbuf_lock);
L
Linus Torvalds 已提交
1415
		break;
1416 1417
	/* Size of the log buffer */
	case SYSLOG_ACTION_SIZE_BUFFER:
L
Linus Torvalds 已提交
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
		error = log_buf_len;
		break;
	default:
		error = -EINVAL;
		break;
	}
out:
	return error;
}

1428
SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
L
Linus Torvalds 已提交
1429
{
1430
	return do_syslog(type, buf, len, SYSLOG_FROM_READER);
L
Linus Torvalds 已提交
1431 1432 1433 1434 1435
}

/*
 * Call the console drivers, asking them to write out
 * log_buf[start] to log_buf[end - 1].
1436
 * The console_lock must be held.
L
Linus Torvalds 已提交
1437
 */
1438 1439 1440
static void call_console_drivers(int level,
				 const char *ext_text, size_t ext_len,
				 const char *text, size_t len)
L
Linus Torvalds 已提交
1441
{
1442
	struct console *con;
L
Linus Torvalds 已提交
1443

1444
	trace_console(text, len);
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458

	if (!console_drivers)
		return;

	for_each_console(con) {
		if (exclusive_console && con != exclusive_console)
			continue;
		if (!(con->flags & CON_ENABLED))
			continue;
		if (!con->write)
			continue;
		if (!cpu_online(smp_processor_id()) &&
		    !(con->flags & CON_ANYTIME))
			continue;
1459 1460 1461 1462
		if (con->flags & CON_EXTENDED)
			con->write(con, ext_text, ext_len);
		else
			con->write(con, text, len);
1463
	}
L
Linus Torvalds 已提交
1464 1465 1466
}

/*
1467 1468 1469
 * Zap console related locks when oopsing.
 * To leave time for slow consoles to print a full oops,
 * only zap at most once every 30 seconds.
L
Linus Torvalds 已提交
1470 1471 1472 1473 1474 1475
 */
static void zap_locks(void)
{
	static unsigned long oops_timestamp;

	if (time_after_eq(jiffies, oops_timestamp) &&
1476
	    !time_after(jiffies, oops_timestamp + 30 * HZ))
L
Linus Torvalds 已提交
1477 1478 1479 1480
		return;

	oops_timestamp = jiffies;

1481
	debug_locks_off();
L
Linus Torvalds 已提交
1482
	/* If a crash is occurring, make sure we can't deadlock */
1483
	raw_spin_lock_init(&logbuf_lock);
L
Linus Torvalds 已提交
1484
	/* And make sure that we print immediately */
1485
	sema_init(&console_sem, 1);
L
Linus Torvalds 已提交
1486 1487
}

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
int printk_delay_msec __read_mostly;

static inline void printk_delay(void)
{
	if (unlikely(printk_delay_msec)) {
		int m = printk_delay_msec;

		while (m--) {
			mdelay(1);
			touch_nmi_watchdog();
		}
	}
}

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
/*
 * Continuation lines are buffered, and not committed to the record buffer
 * until the line is complete, or a race forces it. The line fragments
 * though, are printed immediately to the consoles to ensure everything has
 * reached the console in case of a kernel crash.
 */
static struct cont {
	char buf[LOG_LINE_MAX];
	size_t len;			/* length == 0 means unused buffer */
	size_t cons;			/* bytes written to console */
	struct task_struct *owner;	/* task of first print*/
	u64 ts_nsec;			/* time of first print */
	u8 level;			/* log level of first message */
A
Alex Elder 已提交
1515
	u8 facility;			/* log facility of first message */
1516
	enum log_flags flags;		/* prefix, newline flags */
1517 1518 1519
	bool flushed:1;			/* buffer sealed and committed */
} cont;

1520
static void cont_flush(enum log_flags flags)
1521 1522 1523 1524 1525 1526
{
	if (cont.flushed)
		return;
	if (cont.len == 0)
		return;

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545
	if (cont.cons) {
		/*
		 * If a fragment of this line was directly flushed to the
		 * console; wait for the console to pick up the rest of the
		 * line. LOG_NOCONS suppresses a duplicated output.
		 */
		log_store(cont.facility, cont.level, flags | LOG_NOCONS,
			  cont.ts_nsec, NULL, 0, cont.buf, cont.len);
		cont.flags = flags;
		cont.flushed = true;
	} else {
		/*
		 * If no fragment of this line ever reached the console,
		 * just submit it to the store and free the buffer.
		 */
		log_store(cont.facility, cont.level, flags, 0,
			  NULL, 0, cont.buf, cont.len);
		cont.len = 0;
	}
1546 1547 1548 1549 1550 1551 1552
}

static bool cont_add(int facility, int level, const char *text, size_t len)
{
	if (cont.len && cont.flushed)
		return false;

1553 1554 1555 1556 1557 1558
	/*
	 * If ext consoles are present, flush and skip in-kernel
	 * continuation.  See nr_ext_console_drivers definition.  Also, if
	 * the line gets too long, split it up in separate records.
	 */
	if (nr_ext_console_drivers || cont.len + len > sizeof(cont.buf)) {
1559
		cont_flush(LOG_CONT);
1560 1561 1562 1563 1564 1565 1566 1567
		return false;
	}

	if (!cont.len) {
		cont.facility = facility;
		cont.level = level;
		cont.owner = current;
		cont.ts_nsec = local_clock();
1568
		cont.flags = 0;
1569 1570 1571 1572 1573 1574
		cont.cons = 0;
		cont.flushed = false;
	}

	memcpy(cont.buf + cont.len, text, len);
	cont.len += len;
1575 1576 1577 1578

	if (cont.len > (sizeof(cont.buf) * 80) / 100)
		cont_flush(LOG_CONT);

1579 1580 1581 1582 1583 1584 1585 1586
	return true;
}

static size_t cont_print_text(char *text, size_t size)
{
	size_t textlen = 0;
	size_t len;

1587
	if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) {
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
		textlen += print_time(cont.ts_nsec, text);
		size -= textlen;
	}

	len = cont.len - cont.cons;
	if (len > 0) {
		if (len+1 > size)
			len = size-1;
		memcpy(text + textlen, cont.buf + cont.cons, len);
		textlen += len;
		cont.cons = cont.len;
	}

	if (cont.flushed) {
1602 1603
		if (cont.flags & LOG_NEWLINE)
			text[textlen++] = '\n';
1604 1605 1606 1607 1608 1609
		/* got everything, release buffer */
		cont.len = 0;
	}
	return textlen;
}

1610 1611 1612
asmlinkage int vprintk_emit(int facility, int level,
			    const char *dict, size_t dictlen,
			    const char *fmt, va_list args)
L
Linus Torvalds 已提交
1613
{
1614
	static bool recursion_bug;
1615 1616
	static char textbuf[LOG_LINE_MAX];
	char *text = textbuf;
1617
	size_t text_len = 0;
1618
	enum log_flags lflags = 0;
1619
	unsigned long flags;
1620
	int this_cpu;
1621
	int printed_len = 0;
1622
	int nmi_message_lost;
1623
	bool in_sched = false;
1624
	/* cpu currently holding logbuf_lock in this function */
1625
	static unsigned int logbuf_cpu = UINT_MAX;
1626

1627 1628
	if (level == LOGLEVEL_SCHED) {
		level = LOGLEVEL_DEFAULT;
1629 1630
		in_sched = true;
	}
L
Linus Torvalds 已提交
1631

1632
	boot_delay_msec(level);
1633
	printk_delay();
R
Randy Dunlap 已提交
1634

1635
	local_irq_save(flags);
1636 1637 1638 1639 1640
	this_cpu = smp_processor_id();

	/*
	 * Ouch, printk recursed into itself!
	 */
1641
	if (unlikely(logbuf_cpu == this_cpu)) {
1642 1643 1644 1645 1646 1647 1648
		/*
		 * If a crash is occurring during printk() on this CPU,
		 * then try to get the crash message out but make sure
		 * we can't deadlock. Otherwise just return to avoid the
		 * recursion and return - but flag the recursion so that
		 * it can be printed at the next appropriate moment:
		 */
1649
		if (!oops_in_progress && !lockdep_recursing(current)) {
1650
			recursion_bug = true;
1651 1652
			local_irq_restore(flags);
			return 0;
1653 1654 1655 1656
		}
		zap_locks();
	}

1657
	lockdep_off();
1658
	/* This stops the holder of console_sem just where we want him */
1659
	raw_spin_lock(&logbuf_lock);
1660
	logbuf_cpu = this_cpu;
L
Linus Torvalds 已提交
1661

1662
	if (unlikely(recursion_bug)) {
1663 1664 1665
		static const char recursion_msg[] =
			"BUG: recent printk recursion!";

1666
		recursion_bug = false;
1667
		/* emit KERN_CRIT message */
1668
		printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
1669 1670
					 NULL, 0, recursion_msg,
					 strlen(recursion_msg));
1671
	}
L
Linus Torvalds 已提交
1672

1673 1674 1675 1676 1677 1678 1679 1680 1681
	nmi_message_lost = get_nmi_message_lost();
	if (unlikely(nmi_message_lost)) {
		text_len = scnprintf(textbuf, sizeof(textbuf),
				     "BAD LUCK: lost %d message(s) from NMI context!",
				     nmi_message_lost);
		printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
					 NULL, 0, textbuf, text_len);
	}

1682 1683 1684 1685
	/*
	 * The printf needs to come first; we need the syslog
	 * prefix which might be passed-in as a parameter.
	 */
1686
	text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
1687

1688
	/* mark and strip a trailing newline */
1689 1690
	if (text_len && text[text_len-1] == '\n') {
		text_len--;
1691
		lflags |= LOG_NEWLINE;
1692
	}
1693

1694 1695 1696 1697 1698 1699 1700 1701
	/* strip kernel syslog prefix and extract log level or control flags */
	if (facility == 0) {
		int kern_level = printk_get_level(text);

		if (kern_level) {
			const char *end_of_header = printk_skip_level(text);
			switch (kern_level) {
			case '0' ... '7':
1702
				if (level == LOGLEVEL_DEFAULT)
1703
					level = kern_level - '0';
1704
				/* fallthrough */
1705 1706 1707
			case 'd':	/* KERN_DEFAULT */
				lflags |= LOG_PREFIX;
			}
1708 1709 1710 1711 1712
			/*
			 * No need to check length here because vscnprintf
			 * put '\0' at the end of the string. Only valid and
			 * newly printed level is detected.
			 */
1713 1714
			text_len -= end_of_header - text;
			text = (char *)end_of_header;
1715 1716 1717
		}
	}

1718
	if (level == LOGLEVEL_DEFAULT)
1719
		level = default_message_loglevel;
1720

1721 1722
	if (dict)
		lflags |= LOG_PREFIX|LOG_NEWLINE;
1723

1724
	if (!(lflags & LOG_NEWLINE)) {
1725 1726 1727 1728
		/*
		 * Flush the conflicting buffer. An earlier newline was missing,
		 * or another task also prints continuation lines.
		 */
1729
		if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
1730
			cont_flush(LOG_NEWLINE);
1731

1732
		/* buffer line if possible, otherwise store it right away */
1733 1734 1735 1736 1737 1738
		if (cont_add(facility, level, text, text_len))
			printed_len += text_len;
		else
			printed_len += log_store(facility, level,
						 lflags | LOG_CONT, 0,
						 dict, dictlen, text, text_len);
1739
	} else {
1740
		bool stored = false;
1741

1742
		/*
1743 1744 1745 1746
		 * If an earlier newline was missing and it was the same task,
		 * either merge it with the current buffer and flush, or if
		 * there was a race with interrupts (prefix == true) then just
		 * flush it out and store this line separately.
1747 1748
		 * If the preceding printk was from a different task and missed
		 * a newline, flush and append the newline.
1749
		 */
1750 1751 1752 1753
		if (cont.len) {
			if (cont.owner == current && !(lflags & LOG_PREFIX))
				stored = cont_add(facility, level, text,
						  text_len);
1754
			cont_flush(LOG_NEWLINE);
1755
		}
1756

1757 1758 1759 1760 1761
		if (stored)
			printed_len += text_len;
		else
			printed_len += log_store(facility, level, lflags, 0,
						 dict, dictlen, text, text_len);
L
Linus Torvalds 已提交
1762 1763
	}

1764 1765
	logbuf_cpu = UINT_MAX;
	raw_spin_unlock(&logbuf_lock);
1766 1767
	lockdep_on();
	local_irq_restore(flags);
1768

1769
	/* If called from the scheduler, we can not call up(). */
1770
	if (!in_sched) {
1771
		lockdep_off();
1772 1773 1774 1775 1776
		/*
		 * Try to acquire and then immediately release the console
		 * semaphore.  The release will print out buffers and wake up
		 * /dev/kmsg and syslog() users.
		 */
1777
		if (console_trylock())
1778
			console_unlock();
1779
		lockdep_on();
1780
	}
1781

L
Linus Torvalds 已提交
1782 1783
	return printed_len;
}
1784 1785 1786 1787
EXPORT_SYMBOL(vprintk_emit);

asmlinkage int vprintk(const char *fmt, va_list args)
{
1788
	return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
1789
}
L
Linus Torvalds 已提交
1790 1791
EXPORT_SYMBOL(vprintk);

1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
asmlinkage int printk_emit(int facility, int level,
			   const char *dict, size_t dictlen,
			   const char *fmt, ...)
{
	va_list args;
	int r;

	va_start(args, fmt);
	r = vprintk_emit(facility, level, dict, dictlen, fmt, args);
	va_end(args);

	return r;
}
EXPORT_SYMBOL(printk_emit);

1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
#ifdef CONFIG_PRINTK
#define define_pr_level(func, loglevel)				\
asmlinkage __visible void func(const char *fmt, ...)		\
{								\
	va_list args;						\
								\
	va_start(args, fmt);					\
	vprintk_default(loglevel, fmt, args);			\
	va_end(args);						\
}								\
EXPORT_SYMBOL(func)

define_pr_level(__pr_emerg, LOGLEVEL_EMERG);
define_pr_level(__pr_alert, LOGLEVEL_ALERT);
define_pr_level(__pr_crit, LOGLEVEL_CRIT);
define_pr_level(__pr_err, LOGLEVEL_ERR);
define_pr_level(__pr_warn, LOGLEVEL_WARNING);
define_pr_level(__pr_notice, LOGLEVEL_NOTICE);
define_pr_level(__pr_info, LOGLEVEL_INFO);
#endif

int vprintk_default(int level, const char *fmt, va_list args)
1829 1830 1831 1832 1833
{
	int r;

#ifdef CONFIG_KGDB_KDB
	if (unlikely(kdb_trap_printk)) {
1834
		r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
1835 1836 1837
		return r;
	}
#endif
1838
	r = vprintk_emit(0, level, NULL, 0, fmt, args);
1839 1840 1841 1842 1843

	return r;
}
EXPORT_SYMBOL_GPL(vprintk_default);

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
/**
 * printk - print a kernel message
 * @fmt: format string
 *
 * This is printk(). It can be called from any context. We want it to work.
 *
 * We try to grab the console_lock. If we succeed, it's easy - we log the
 * output and call the console drivers.  If we fail to get the semaphore, we
 * place the output into the log buffer and return. The current holder of
 * the console_sem will notice the new output in console_unlock(); and will
 * send it to the consoles before releasing the lock.
 *
 * One effect of this deferred printing is that code which calls printk() and
 * then changes console_loglevel may break. This is because console_loglevel
 * is inspected when the actual printing occurs.
 *
 * See also:
 * printf(3)
 *
 * See the vsnprintf() documentation for format string extensions over C99.
 */
1865
asmlinkage __visible int printk(const char *fmt, ...)
1866 1867 1868 1869 1870
{
	va_list args;
	int r;

	va_start(args, fmt);
1871
	r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args);
1872 1873 1874 1875 1876
	va_end(args);

	return r;
}
EXPORT_SYMBOL(printk);
1877

1878
#else /* CONFIG_PRINTK */
M
Matt Mackall 已提交
1879

1880 1881
#define LOG_LINE_MAX		0
#define PREFIX_MAX		0
A
Alex Elder 已提交
1882

1883 1884
static u64 syslog_seq;
static u32 syslog_idx;
1885 1886
static u64 console_seq;
static u32 console_idx;
1887 1888 1889 1890
static enum log_flags syslog_prev;
static u64 log_first_seq;
static u32 log_first_idx;
static u64 log_next_seq;
1891
static enum log_flags console_prev;
1892 1893 1894 1895 1896 1897
static struct cont {
	size_t len;
	size_t cons;
	u8 level;
	bool flushed:1;
} cont;
1898 1899
static char *log_text(const struct printk_log *msg) { return NULL; }
static char *log_dict(const struct printk_log *msg) { return NULL; }
1900
static struct printk_log *log_from_idx(u32 idx) { return NULL; }
1901
static u32 log_next(u32 idx) { return 0; }
1902 1903 1904 1905 1906 1907 1908 1909 1910
static ssize_t msg_print_ext_header(char *buf, size_t size,
				    struct printk_log *msg, u64 seq,
				    enum log_flags prev_flags) { return 0; }
static ssize_t msg_print_ext_body(char *buf, size_t size,
				  char *dict, size_t dict_len,
				  char *text, size_t text_len) { return 0; }
static void call_console_drivers(int level,
				 const char *ext_text, size_t ext_len,
				 const char *text, size_t len) {}
1911
static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
1912
			     bool syslog, char *buf, size_t size) { return 0; }
1913
static size_t cont_print_text(char *text, size_t size) { return 0; }
1914
static bool suppress_message_printing(int level) { return false; }
M
Matt Mackall 已提交
1915

1916 1917 1918
/* Still needs to be defined for users */
DEFINE_PER_CPU(printk_func_t, printk_func);

1919
#endif /* CONFIG_PRINTK */
M
Matt Mackall 已提交
1920

1921 1922 1923
#ifdef CONFIG_EARLY_PRINTK
struct console *early_console;

1924
asmlinkage __visible void early_printk(const char *fmt, ...)
1925 1926
{
	va_list ap;
1927 1928 1929 1930 1931
	char buf[512];
	int n;

	if (!early_console)
		return;
1932 1933

	va_start(ap, fmt);
1934
	n = vscnprintf(buf, sizeof(buf), fmt, ap);
1935
	va_end(ap);
1936 1937

	early_console->write(early_console, buf, n);
1938 1939 1940
}
#endif

1941 1942 1943 1944 1945 1946 1947 1948 1949 1950
static int __add_preferred_console(char *name, int idx, char *options,
				   char *brl_options)
{
	struct console_cmdline *c;
	int i;

	/*
	 *	See if this tty is not yet registered, and
	 *	if we have a slot free.
	 */
1951 1952 1953 1954 1955 1956 1957
	for (i = 0, c = console_cmdline;
	     i < MAX_CMDLINECONSOLES && c->name[0];
	     i++, c++) {
		if (strcmp(c->name, name) == 0 && c->index == idx) {
			if (!brl_options)
				selected_console = i;
			return 0;
1958
		}
1959
	}
1960 1961 1962 1963 1964 1965
	if (i == MAX_CMDLINECONSOLES)
		return -E2BIG;
	if (!brl_options)
		selected_console = i;
	strlcpy(c->name, name, sizeof(c->name));
	c->options = options;
1966 1967
	braille_set_options(c, brl_options);

1968 1969 1970
	c->index = idx;
	return 0;
}
1971
/*
A
Alex Elder 已提交
1972 1973
 * Set up a console.  Called via do_early_param() in init/main.c
 * for each "console=" parameter in the boot command line.
1974 1975 1976
 */
static int __init console_setup(char *str)
{
A
Alex Elder 已提交
1977
	char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */
1978
	char *s, *options, *brl_options = NULL;
1979 1980
	int idx;

1981 1982
	if (_braille_console_setup(&str, &brl_options))
		return 1;
1983

1984 1985 1986 1987
	/*
	 * Decode str into name, index, options.
	 */
	if (str[0] >= '0' && str[0] <= '9') {
Y
Yinghai Lu 已提交
1988 1989
		strcpy(buf, "ttyS");
		strncpy(buf + 4, str, sizeof(buf) - 5);
1990
	} else {
Y
Yinghai Lu 已提交
1991
		strncpy(buf, str, sizeof(buf) - 1);
1992
	}
Y
Yinghai Lu 已提交
1993
	buf[sizeof(buf) - 1] = 0;
A
Alex Elder 已提交
1994 1995
	options = strchr(str, ',');
	if (options)
1996 1997 1998
		*(options++) = 0;
#ifdef __sparc__
	if (!strcmp(str, "ttya"))
Y
Yinghai Lu 已提交
1999
		strcpy(buf, "ttyS0");
2000
	if (!strcmp(str, "ttyb"))
Y
Yinghai Lu 已提交
2001
		strcpy(buf, "ttyS1");
2002
#endif
Y
Yinghai Lu 已提交
2003
	for (s = buf; *s; s++)
A
Alex Elder 已提交
2004
		if (isdigit(*s) || *s == ',')
2005 2006 2007 2008
			break;
	idx = simple_strtoul(s, NULL, 10);
	*s = 0;

2009
	__add_preferred_console(buf, idx, options, brl_options);
2010
	console_set_on_cmdline = 1;
2011 2012 2013 2014
	return 1;
}
__setup("console=", console_setup);

2015 2016
/**
 * add_preferred_console - add a device to the list of preferred consoles.
2017 2018 2019
 * @name: device name
 * @idx: device index
 * @options: options for this console
2020 2021 2022 2023 2024 2025 2026 2027
 *
 * The last preferred console added will be used for kernel messages
 * and stdin/out/err for init.  Normally this is used by console_setup
 * above to handle user-supplied console arguments; however it can also
 * be used by arch-specific code either to override the user or more
 * commonly to provide a default console (ie from PROM variables) when
 * the user has not supplied one.
 */
2028
int add_preferred_console(char *name, int idx, char *options)
2029
{
2030
	return __add_preferred_console(name, idx, options, NULL);
2031 2032
}

2033
bool console_suspend_enabled = true;
2034 2035 2036 2037
EXPORT_SYMBOL(console_suspend_enabled);

static int __init console_suspend_disable(char *str)
{
2038
	console_suspend_enabled = false;
2039 2040 2041
	return 1;
}
__setup("no_console_suspend", console_suspend_disable);
2042 2043 2044 2045
module_param_named(console_suspend, console_suspend_enabled,
		bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
	" and hibernate operations");
2046

2047 2048 2049 2050 2051 2052 2053
/**
 * suspend_console - suspend the console subsystem
 *
 * This disables printk() while we go into suspend states
 */
void suspend_console(void)
{
2054 2055
	if (!console_suspend_enabled)
		return;
2056
	printk("Suspending console(s) (use no_console_suspend to debug)\n");
2057
	console_lock();
2058
	console_suspended = 1;
2059
	up_console_sem();
2060 2061 2062 2063
}

void resume_console(void)
{
2064 2065
	if (!console_suspend_enabled)
		return;
2066
	down_console_sem();
2067
	console_suspended = 0;
2068
	console_unlock();
2069 2070
}

2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
/**
 * console_cpu_notify - print deferred console messages after CPU hotplug
 * @self: notifier struct
 * @action: CPU hotplug event
 * @hcpu: unused
 *
 * If printk() is called from a CPU that is not online yet, the messages
 * will be spooled but will not show up on the console.  This function is
 * called when a new CPU comes online (or fails to come up), and ensures
 * that any such output gets printed.
 */
2082
static int console_cpu_notify(struct notifier_block *self,
2083 2084 2085 2086 2087 2088 2089
	unsigned long action, void *hcpu)
{
	switch (action) {
	case CPU_ONLINE:
	case CPU_DEAD:
	case CPU_DOWN_FAILED:
	case CPU_UP_CANCELED:
2090 2091
		console_lock();
		console_unlock();
2092 2093 2094 2095
	}
	return NOTIFY_OK;
}

L
Linus Torvalds 已提交
2096
/**
2097
 * console_lock - lock the console system for exclusive use.
L
Linus Torvalds 已提交
2098
 *
2099
 * Acquires a lock which guarantees that the caller has
L
Linus Torvalds 已提交
2100 2101 2102 2103
 * exclusive access to the console system and the console_drivers list.
 *
 * Can sleep, returns nothing.
 */
2104
void console_lock(void)
L
Linus Torvalds 已提交
2105
{
2106 2107
	might_sleep();

2108
	down_console_sem();
2109 2110
	if (console_suspended)
		return;
L
Linus Torvalds 已提交
2111 2112 2113
	console_locked = 1;
	console_may_schedule = 1;
}
2114
EXPORT_SYMBOL(console_lock);
L
Linus Torvalds 已提交
2115

2116 2117 2118
/**
 * console_trylock - try to lock the console system for exclusive use.
 *
A
Alex Elder 已提交
2119 2120
 * Try to acquire a lock which guarantees that the caller has exclusive
 * access to the console system and the console_drivers list.
2121 2122 2123 2124
 *
 * returns 1 on success, and 0 on failure to acquire the lock.
 */
int console_trylock(void)
L
Linus Torvalds 已提交
2125
{
2126
	if (down_trylock_console_sem())
2127
		return 0;
2128
	if (console_suspended) {
2129
		up_console_sem();
2130
		return 0;
2131
	}
L
Linus Torvalds 已提交
2132
	console_locked = 1;
2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
	/*
	 * When PREEMPT_COUNT disabled we can't reliably detect if it's
	 * safe to schedule (e.g. calling printk while holding a spin_lock),
	 * because preempt_disable()/preempt_enable() are just barriers there
	 * and preempt_count() is always 0.
	 *
	 * RCU read sections have a separate preemption counter when
	 * PREEMPT_RCU enabled thus we must take extra care and check
	 * rcu_preempt_depth(), otherwise RCU read sections modify
	 * preempt_count().
	 */
	console_may_schedule = !oops_in_progress &&
			preemptible() &&
			!rcu_preempt_depth();
2147
	return 1;
L
Linus Torvalds 已提交
2148
}
2149
EXPORT_SYMBOL(console_trylock);
L
Linus Torvalds 已提交
2150 2151 2152 2153 2154 2155

int is_console_locked(void)
{
	return console_locked;
}

2156 2157 2158 2159 2160 2161 2162 2163 2164
/*
 * Check if we have any console that is capable of printing while cpu is
 * booting or shutting down. Requires console_sem.
 */
static int have_callable_console(void)
{
	struct console *con;

	for_each_console(con)
2165 2166
		if ((con->flags & CON_ENABLED) &&
				(con->flags & CON_ANYTIME))
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
			return 1;

	return 0;
}

/*
 * Can we actually use the console at this time on this cpu?
 *
 * Console drivers may assume that per-cpu resources have been allocated. So
 * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
 * call them until this CPU is officially up.
 */
static inline int can_use_console(void)
{
	return cpu_online(raw_smp_processor_id()) || have_callable_console();
}

2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
static void console_cont_flush(char *text, size_t size)
{
	unsigned long flags;
	size_t len;

	raw_spin_lock_irqsave(&logbuf_lock, flags);

	if (!cont.len)
		goto out;

2194 2195 2196 2197 2198 2199 2200
	if (suppress_message_printing(cont.level)) {
		cont.cons = cont.len;
		if (cont.flushed)
			cont.len = 0;
		goto out;
	}

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
	/*
	 * We still queue earlier records, likely because the console was
	 * busy. The earlier ones need to be printed before this one, we
	 * did not flush any fragment so far, so just let it queue up.
	 */
	if (console_seq < log_next_seq && !cont.cons)
		goto out;

	len = cont_print_text(text, size);
	raw_spin_unlock(&logbuf_lock);
	stop_critical_timings();
2212
	call_console_drivers(cont.level, NULL, 0, text, len);
2213 2214 2215 2216 2217 2218
	start_critical_timings();
	local_irq_restore(flags);
	return;
out:
	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
}
2219

L
Linus Torvalds 已提交
2220
/**
2221
 * console_unlock - unlock the console system
L
Linus Torvalds 已提交
2222
 *
2223
 * Releases the console_lock which the caller holds on the console system
L
Linus Torvalds 已提交
2224 2225
 * and the console driver list.
 *
2226 2227 2228
 * While the console_lock was held, console output may have been buffered
 * by printk().  If this is the case, console_unlock(); emits
 * the output prior to releasing the lock.
L
Linus Torvalds 已提交
2229
 *
2230
 * If there is output waiting, we wake /dev/kmsg and syslog() users.
L
Linus Torvalds 已提交
2231
 *
2232
 * console_unlock(); may be called from any context.
L
Linus Torvalds 已提交
2233
 */
2234
void console_unlock(void)
L
Linus Torvalds 已提交
2235
{
2236
	static char ext_text[CONSOLE_EXT_LOG_MAX];
2237
	static char text[LOG_LINE_MAX + PREFIX_MAX];
2238
	static u64 seen_seq;
L
Linus Torvalds 已提交
2239
	unsigned long flags;
2240
	bool wake_klogd = false;
2241
	bool do_cond_resched, retry;
L
Linus Torvalds 已提交
2242

2243
	if (console_suspended) {
2244
		up_console_sem();
2245 2246
		return;
	}
2247

2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258
	/*
	 * Console drivers are called under logbuf_lock, so
	 * @console_may_schedule should be cleared before; however, we may
	 * end up dumping a lot of lines, for example, if called from
	 * console registration path, and should invoke cond_resched()
	 * between lines if allowable.  Not doing so can cause a very long
	 * scheduling stall on a slow console leading to RCU stall and
	 * softlockup warnings which exacerbate the issue with more
	 * messages practically incapacitating the system.
	 */
	do_cond_resched = console_may_schedule;
2259 2260
	console_may_schedule = 0;

2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
again:
	/*
	 * We released the console_sem lock, so we need to recheck if
	 * cpu is online and (if not) is there at least one CON_ANYTIME
	 * console.
	 */
	if (!can_use_console()) {
		console_locked = 0;
		up_console_sem();
		return;
	}

2273
	/* flush buffered message fragment immediately to console */
2274
	console_cont_flush(text, sizeof(text));
2275

2276
	for (;;) {
2277
		struct printk_log *msg;
2278
		size_t ext_len = 0;
2279
		size_t len;
2280 2281
		int level;

2282
		raw_spin_lock_irqsave(&logbuf_lock, flags);
2283 2284 2285 2286 2287 2288
		if (seen_seq != log_next_seq) {
			wake_klogd = true;
			seen_seq = log_next_seq;
		}

		if (console_seq < log_first_seq) {
2289 2290 2291
			len = sprintf(text, "** %u printk messages dropped ** ",
				      (unsigned)(log_first_seq - console_seq));

2292 2293 2294
			/* messages are gone, move to first one */
			console_seq = log_first_seq;
			console_idx = log_first_idx;
2295
			console_prev = 0;
2296 2297
		} else {
			len = 0;
2298
		}
2299
skip:
2300 2301 2302 2303
		if (console_seq == log_next_seq)
			break;

		msg = log_from_idx(console_idx);
2304 2305 2306
		level = msg->level;
		if ((msg->flags & LOG_NOCONS) ||
				suppress_message_printing(level)) {
2307 2308
			/*
			 * Skip record we have buffered and already printed
2309 2310
			 * directly to the console when we received it, and
			 * record that has level above the console loglevel.
2311 2312 2313
			 */
			console_idx = log_next(console_idx);
			console_seq++;
2314 2315 2316 2317 2318 2319
			/*
			 * We will get here again when we register a new
			 * CON_PRINTBUFFER console. Clear the flag so we
			 * will properly dump everything later.
			 */
			msg->flags &= ~LOG_NOCONS;
2320
			console_prev = msg->flags;
2321 2322
			goto skip;
		}
2323

2324 2325
		len += msg_print_text(msg, console_prev, false,
				      text + len, sizeof(text) - len);
2326 2327 2328 2329 2330 2331 2332 2333 2334
		if (nr_ext_console_drivers) {
			ext_len = msg_print_ext_header(ext_text,
						sizeof(ext_text),
						msg, console_seq, console_prev);
			ext_len += msg_print_ext_body(ext_text + ext_len,
						sizeof(ext_text) - ext_len,
						log_dict(msg), msg->dict_len,
						log_text(msg), msg->text_len);
		}
2335 2336
		console_idx = log_next(console_idx);
		console_seq++;
2337
		console_prev = msg->flags;
2338
		raw_spin_unlock(&logbuf_lock);
2339

2340
		stop_critical_timings();	/* don't trace print latency */
2341
		call_console_drivers(level, ext_text, ext_len, text, len);
2342
		start_critical_timings();
L
Linus Torvalds 已提交
2343
		local_irq_restore(flags);
2344 2345 2346

		if (do_cond_resched)
			cond_resched();
L
Linus Torvalds 已提交
2347 2348
	}
	console_locked = 0;
2349 2350 2351 2352 2353

	/* Release the exclusive_console once it is used */
	if (unlikely(exclusive_console))
		exclusive_console = NULL;

2354
	raw_spin_unlock(&logbuf_lock);
2355

2356
	up_console_sem();
2357 2358 2359 2360 2361 2362 2363

	/*
	 * Someone could have filled up the buffer again, so re-check if there's
	 * something to flush. In case we cannot trylock the console_sem again,
	 * there's a new owner and the console_unlock() from them will do the
	 * flush, no worries.
	 */
2364
	raw_spin_lock(&logbuf_lock);
2365
	retry = console_seq != log_next_seq;
P
Peter Zijlstra 已提交
2366 2367
	raw_spin_unlock_irqrestore(&logbuf_lock, flags);

2368 2369 2370
	if (retry && console_trylock())
		goto again;

2371 2372
	if (wake_klogd)
		wake_up_klogd();
L
Linus Torvalds 已提交
2373
}
2374
EXPORT_SYMBOL(console_unlock);
L
Linus Torvalds 已提交
2375

2376 2377
/**
 * console_conditional_schedule - yield the CPU if required
L
Linus Torvalds 已提交
2378 2379 2380 2381 2382
 *
 * If the console code is currently allowed to sleep, and
 * if this CPU should yield the CPU to another task, do
 * so here.
 *
2383
 * Must be called within console_lock();.
L
Linus Torvalds 已提交
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
 */
void __sched console_conditional_schedule(void)
{
	if (console_may_schedule)
		cond_resched();
}
EXPORT_SYMBOL(console_conditional_schedule);

void console_unblank(void)
{
	struct console *c;

	/*
	 * console_unblank can no longer be called in interrupt context unless
	 * oops_in_progress is set to 1..
	 */
	if (oops_in_progress) {
2401
		if (down_trylock_console_sem() != 0)
L
Linus Torvalds 已提交
2402 2403
			return;
	} else
2404
		console_lock();
L
Linus Torvalds 已提交
2405 2406 2407

	console_locked = 1;
	console_may_schedule = 0;
2408
	for_each_console(c)
L
Linus Torvalds 已提交
2409 2410
		if ((c->flags & CON_ENABLED) && c->unblank)
			c->unblank();
2411
	console_unlock();
L
Linus Torvalds 已提交
2412 2413
}

2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432
/**
 * console_flush_on_panic - flush console content on panic
 *
 * Immediately output all pending messages no matter what.
 */
void console_flush_on_panic(void)
{
	/*
	 * If someone else is holding the console lock, trylock will fail
	 * and may_schedule may be set.  Ignore and proceed to unlock so
	 * that messages are flushed out.  As this can be called from any
	 * context and we don't want to get preempted while flushing,
	 * ensure may_schedule is cleared.
	 */
	console_trylock();
	console_may_schedule = 0;
	console_unlock();
}

L
Linus Torvalds 已提交
2433 2434 2435 2436 2437 2438 2439 2440
/*
 * Return the console tty driver structure and its associated index
 */
struct tty_driver *console_device(int *index)
{
	struct console *c;
	struct tty_driver *driver = NULL;

2441
	console_lock();
2442
	for_each_console(c) {
L
Linus Torvalds 已提交
2443 2444 2445 2446 2447 2448
		if (!c->device)
			continue;
		driver = c->device(c, index);
		if (driver)
			break;
	}
2449
	console_unlock();
L
Linus Torvalds 已提交
2450 2451 2452 2453 2454 2455 2456 2457 2458 2459
	return driver;
}

/*
 * Prevent further output on the passed console device so that (for example)
 * serial drivers can disable console output before suspending a port, and can
 * re-enable output afterwards.
 */
void console_stop(struct console *console)
{
2460
	console_lock();
L
Linus Torvalds 已提交
2461
	console->flags &= ~CON_ENABLED;
2462
	console_unlock();
L
Linus Torvalds 已提交
2463 2464 2465 2466 2467
}
EXPORT_SYMBOL(console_stop);

void console_start(struct console *console)
{
2468
	console_lock();
L
Linus Torvalds 已提交
2469
	console->flags |= CON_ENABLED;
2470
	console_unlock();
L
Linus Torvalds 已提交
2471 2472 2473
}
EXPORT_SYMBOL(console_start);

2474 2475 2476 2477 2478
static int __read_mostly keep_bootcon;

static int __init keep_bootcon_setup(char *str)
{
	keep_bootcon = 1;
2479
	pr_info("debug: skip boot console de-registration.\n");
2480 2481 2482 2483 2484 2485

	return 0;
}

early_param("keep_bootcon", keep_bootcon_setup);

L
Linus Torvalds 已提交
2486 2487 2488 2489 2490
/*
 * The console driver calls this routine during kernel initialization
 * to register the console printing procedure with printk() and to
 * print any messages that were printed by the kernel before the
 * console driver was initialized.
2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
 *
 * This can happen pretty early during the boot process (because of
 * early_printk) - sometimes before setup_arch() completes - be careful
 * of what kernel features are used - they may not be initialised yet.
 *
 * There are two types of consoles - bootconsoles (early_printk) and
 * "real" consoles (everything which is not a bootconsole) which are
 * handled differently.
 *  - Any number of bootconsoles can be registered at any time.
 *  - As soon as a "real" console is registered, all bootconsoles
 *    will be unregistered automatically.
 *  - Once a "real" console is registered, any attempt to register a
 *    bootconsoles will be rejected
L
Linus Torvalds 已提交
2504
 */
2505
void register_console(struct console *newcon)
L
Linus Torvalds 已提交
2506
{
J
Jesper Juhl 已提交
2507
	int i;
L
Linus Torvalds 已提交
2508
	unsigned long flags;
2509
	struct console *bcon = NULL;
2510
	struct console_cmdline *c;
L
Linus Torvalds 已提交
2511

2512 2513 2514 2515 2516 2517 2518
	if (console_drivers)
		for_each_console(bcon)
			if (WARN(bcon == newcon,
					"console '%s%d' already registered\n",
					bcon->name, bcon->index))
				return;

2519 2520 2521 2522 2523 2524 2525 2526
	/*
	 * before we register a new CON_BOOT console, make sure we don't
	 * already have a valid console
	 */
	if (console_drivers && newcon->flags & CON_BOOT) {
		/* find the last or real console */
		for_each_console(bcon) {
			if (!(bcon->flags & CON_BOOT)) {
2527
				pr_info("Too late to register bootconsole %s%d\n",
2528 2529 2530 2531
					newcon->name, newcon->index);
				return;
			}
		}
2532 2533
	}

2534 2535 2536 2537
	if (console_drivers && console_drivers->flags & CON_BOOT)
		bcon = console_drivers;

	if (preferred_console < 0 || bcon || !console_drivers)
L
Linus Torvalds 已提交
2538 2539 2540 2541 2542 2543 2544 2545
		preferred_console = selected_console;

	/*
	 *	See if we want to use this console driver. If we
	 *	didn't select a console we take the first one
	 *	that registers here.
	 */
	if (preferred_console < 0) {
2546 2547 2548 2549 2550 2551 2552
		if (newcon->index < 0)
			newcon->index = 0;
		if (newcon->setup == NULL ||
		    newcon->setup(newcon, NULL) == 0) {
			newcon->flags |= CON_ENABLED;
			if (newcon->device) {
				newcon->flags |= CON_CONSDEV;
2553 2554
				preferred_console = 0;
			}
L
Linus Torvalds 已提交
2555 2556 2557 2558 2559 2560 2561
		}
	}

	/*
	 *	See if this console matches one we selected on
	 *	the command line.
	 */
2562 2563 2564
	for (i = 0, c = console_cmdline;
	     i < MAX_CMDLINECONSOLES && c->name[0];
	     i++, c++) {
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
		if (!newcon->match ||
		    newcon->match(newcon, c->name, c->index, c->options) != 0) {
			/* default matching */
			BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
			if (strcmp(c->name, newcon->name) != 0)
				continue;
			if (newcon->index >= 0 &&
			    newcon->index != c->index)
				continue;
			if (newcon->index < 0)
				newcon->index = c->index;
2576

2577 2578 2579 2580 2581 2582 2583
			if (_braille_register_console(newcon, c))
				return;

			if (newcon->setup &&
			    newcon->setup(newcon, c->options) != 0)
				break;
		}
2584

2585
		newcon->flags |= CON_ENABLED;
2586
		if (i == selected_console) {
2587
			newcon->flags |= CON_CONSDEV;
2588 2589
			preferred_console = selected_console;
		}
L
Linus Torvalds 已提交
2590 2591 2592
		break;
	}

2593
	if (!(newcon->flags & CON_ENABLED))
L
Linus Torvalds 已提交
2594 2595
		return;

2596 2597 2598 2599 2600 2601 2602
	/*
	 * If we have a bootconsole, and are switching to a real console,
	 * don't print everything out again, since when the boot console, and
	 * the real console are the same physical device, it's annoying to
	 * see the beginning boot messages twice
	 */
	if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
2603
		newcon->flags &= ~CON_PRINTBUFFER;
L
Linus Torvalds 已提交
2604 2605 2606 2607 2608

	/*
	 *	Put this console in the list - keep the
	 *	preferred driver at the head of the list.
	 */
2609
	console_lock();
2610 2611 2612 2613 2614
	if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
		newcon->next = console_drivers;
		console_drivers = newcon;
		if (newcon->next)
			newcon->next->flags &= ~CON_CONSDEV;
L
Linus Torvalds 已提交
2615
	} else {
2616 2617
		newcon->next = console_drivers->next;
		console_drivers->next = newcon;
L
Linus Torvalds 已提交
2618
	}
2619 2620 2621 2622 2623

	if (newcon->flags & CON_EXTENDED)
		if (!nr_ext_console_drivers++)
			pr_info("printk: continuation disabled due to ext consoles, expect more fragments in /dev/kmsg\n");

2624
	if (newcon->flags & CON_PRINTBUFFER) {
L
Linus Torvalds 已提交
2625
		/*
2626
		 * console_unlock(); will print out the buffered messages
L
Linus Torvalds 已提交
2627 2628
		 * for us.
		 */
2629
		raw_spin_lock_irqsave(&logbuf_lock, flags);
2630 2631
		console_seq = syslog_seq;
		console_idx = syslog_idx;
2632
		console_prev = syslog_prev;
2633
		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2634 2635 2636 2637 2638 2639
		/*
		 * We're about to replay the log buffer.  Only do this to the
		 * just-registered console to avoid excessive message spam to
		 * the already-registered consoles.
		 */
		exclusive_console = newcon;
L
Linus Torvalds 已提交
2640
	}
2641
	console_unlock();
2642
	console_sysfs_notify();
2643 2644 2645 2646 2647 2648 2649 2650

	/*
	 * By unregistering the bootconsoles after we enable the real console
	 * we get the "console xxx enabled" message on all the consoles -
	 * boot consoles, real consoles, etc - this is to ensure that end
	 * users know there might be something in the kernel's log buffer that
	 * went to the bootconsole (that they do not see on the real console)
	 */
2651
	pr_info("%sconsole [%s%d] enabled\n",
2652 2653
		(newcon->flags & CON_BOOT) ? "boot" : "" ,
		newcon->name, newcon->index);
2654 2655 2656
	if (bcon &&
	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
	    !keep_bootcon) {
2657 2658
		/* We need to iterate through all boot consoles, to make
		 * sure we print everything out, before we unregister them.
2659 2660 2661 2662 2663
		 */
		for_each_console(bcon)
			if (bcon->flags & CON_BOOT)
				unregister_console(bcon);
	}
L
Linus Torvalds 已提交
2664 2665 2666
}
EXPORT_SYMBOL(register_console);

J
Jesper Juhl 已提交
2667
int unregister_console(struct console *console)
L
Linus Torvalds 已提交
2668
{
J
Jesper Juhl 已提交
2669
        struct console *a, *b;
2670
	int res;
L
Linus Torvalds 已提交
2671

2672
	pr_info("%sconsole [%s%d] disabled\n",
2673 2674 2675
		(console->flags & CON_BOOT) ? "boot" : "" ,
		console->name, console->index);

2676 2677 2678
	res = _braille_unregister_console(console);
	if (res)
		return res;
2679

2680
	res = 1;
2681
	console_lock();
L
Linus Torvalds 已提交
2682 2683 2684
	if (console_drivers == console) {
		console_drivers=console->next;
		res = 0;
2685
	} else if (console_drivers) {
L
Linus Torvalds 已提交
2686 2687 2688 2689 2690 2691
		for (a=console_drivers->next, b=console_drivers ;
		     a; b=a, a=b->next) {
			if (a == console) {
				b->next = a->next;
				res = 0;
				break;
J
Jesper Juhl 已提交
2692
			}
L
Linus Torvalds 已提交
2693 2694
		}
	}
J
Jesper Juhl 已提交
2695

2696 2697 2698
	if (!res && (console->flags & CON_EXTENDED))
		nr_ext_console_drivers--;

2699
	/*
2700 2701
	 * If this isn't the last console and it has CON_CONSDEV set, we
	 * need to set it on the next preferred console.
L
Linus Torvalds 已提交
2702
	 */
2703
	if (console_drivers != NULL && console->flags & CON_CONSDEV)
2704
		console_drivers->flags |= CON_CONSDEV;
L
Linus Torvalds 已提交
2705

2706
	console->flags &= ~CON_ENABLED;
2707
	console_unlock();
2708
	console_sysfs_notify();
L
Linus Torvalds 已提交
2709 2710 2711
	return res;
}
EXPORT_SYMBOL(unregister_console);
M
Matt Mackall 已提交
2712

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
/*
 * Some boot consoles access data that is in the init section and which will
 * be discarded after the initcalls have been run. To make sure that no code
 * will access this data, unregister the boot consoles in a late initcall.
 *
 * If for some reason, such as deferred probe or the driver being a loadable
 * module, the real console hasn't registered yet at this point, there will
 * be a brief interval in which no messages are logged to the console, which
 * makes it difficult to diagnose problems that occur during this time.
 *
 * To mitigate this problem somewhat, only unregister consoles whose memory
 * intersects with the init section. Note that code exists elsewhere to get
 * rid of the boot console as soon as the proper console shows up, so there
 * won't be side-effects from postponing the removal.
 */
2728
static int __init printk_late_init(void)
2729
{
2730 2731 2732
	struct console *con;

	for_each_console(con) {
2733
		if (!keep_bootcon && con->flags & CON_BOOT) {
2734 2735 2736 2737 2738 2739 2740 2741 2742
			/*
			 * Make sure to unregister boot consoles whose data
			 * resides in the init section before the init section
			 * is discarded. Boot consoles whose data will stick
			 * around will automatically be unregistered when the
			 * proper console replaces them.
			 */
			if (init_section_intersects(con, sizeof(*con)))
				unregister_console(con);
2743
		}
2744
	}
2745
	hotcpu_notifier(console_cpu_notify, 0);
2746 2747
	return 0;
}
2748
late_initcall(printk_late_init);
2749

2750
#if defined CONFIG_PRINTK
2751 2752 2753 2754
/*
 * Delayed printk version, for scheduler-internal messages:
 */
#define PRINTK_PENDING_WAKEUP	0x01
2755
#define PRINTK_PENDING_OUTPUT	0x02
2756 2757 2758 2759 2760 2761 2762

static DEFINE_PER_CPU(int, printk_pending);

static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
	int pending = __this_cpu_xchg(printk_pending, 0);

2763 2764 2765 2766
	if (pending & PRINTK_PENDING_OUTPUT) {
		/* If trylock fails, someone else is doing the printing */
		if (console_trylock())
			console_unlock();
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782
	}

	if (pending & PRINTK_PENDING_WAKEUP)
		wake_up_interruptible(&log_wait);
}

static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
	.func = wake_up_klogd_work_func,
	.flags = IRQ_WORK_LAZY,
};

void wake_up_klogd(void)
{
	preempt_disable();
	if (waitqueue_active(&log_wait)) {
		this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
2783
		irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
2784 2785 2786
	}
	preempt_enable();
}
D
Dave Young 已提交
2787

2788
int printk_deferred(const char *fmt, ...)
2789 2790 2791 2792
{
	va_list args;
	int r;

2793
	preempt_disable();
2794
	va_start(args, fmt);
2795
	r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
2796 2797
	va_end(args);

2798
	__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
2799
	irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
2800
	preempt_enable();
2801 2802 2803 2804

	return r;
}

L
Linus Torvalds 已提交
2805 2806 2807
/*
 * printk rate limiting, lifted from the networking subsystem.
 *
2808 2809
 * This enforces a rate limit: not more than 10 kernel messages
 * every 5s to make a denial-of-service attack impossible.
L
Linus Torvalds 已提交
2810
 */
2811 2812
DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);

2813
int __printk_ratelimit(const char *func)
L
Linus Torvalds 已提交
2814
{
2815
	return ___ratelimit(&printk_ratelimit_state, func);
L
Linus Torvalds 已提交
2816
}
2817
EXPORT_SYMBOL(__printk_ratelimit);
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830

/**
 * printk_timed_ratelimit - caller-controlled printk ratelimiting
 * @caller_jiffies: pointer to caller's state
 * @interval_msecs: minimum interval between prints
 *
 * printk_timed_ratelimit() returns true if more than @interval_msecs
 * milliseconds have elapsed since the last time printk_timed_ratelimit()
 * returned true.
 */
bool printk_timed_ratelimit(unsigned long *caller_jiffies,
			unsigned int interval_msecs)
{
A
Alex Elder 已提交
2831 2832 2833 2834 2835 2836 2837
	unsigned long elapsed = jiffies - *caller_jiffies;

	if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
		return false;

	*caller_jiffies = jiffies;
	return true;
2838 2839
}
EXPORT_SYMBOL(printk_timed_ratelimit);
2840 2841 2842 2843 2844 2845

static DEFINE_SPINLOCK(dump_list_lock);
static LIST_HEAD(dump_list);

/**
 * kmsg_dump_register - register a kernel log dumper.
R
Randy Dunlap 已提交
2846
 * @dumper: pointer to the kmsg_dumper structure
2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
 *
 * Adds a kernel log dumper to the system. The dump callback in the
 * structure will be called when the kernel oopses or panics and must be
 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
 */
int kmsg_dump_register(struct kmsg_dumper *dumper)
{
	unsigned long flags;
	int err = -EBUSY;

	/* The dump callback needs to be set */
	if (!dumper->dump)
		return -EINVAL;

	spin_lock_irqsave(&dump_list_lock, flags);
	/* Don't allow registering multiple times */
	if (!dumper->registered) {
		dumper->registered = 1;
2865
		list_add_tail_rcu(&dumper->list, &dump_list);
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
		err = 0;
	}
	spin_unlock_irqrestore(&dump_list_lock, flags);

	return err;
}
EXPORT_SYMBOL_GPL(kmsg_dump_register);

/**
 * kmsg_dump_unregister - unregister a kmsg dumper.
R
Randy Dunlap 已提交
2876
 * @dumper: pointer to the kmsg_dumper structure
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
 *
 * Removes a dump device from the system. Returns zero on success and
 * %-EINVAL otherwise.
 */
int kmsg_dump_unregister(struct kmsg_dumper *dumper)
{
	unsigned long flags;
	int err = -EINVAL;

	spin_lock_irqsave(&dump_list_lock, flags);
	if (dumper->registered) {
		dumper->registered = 0;
2889
		list_del_rcu(&dumper->list);
2890 2891 2892
		err = 0;
	}
	spin_unlock_irqrestore(&dump_list_lock, flags);
2893
	synchronize_rcu();
2894 2895 2896 2897 2898

	return err;
}
EXPORT_SYMBOL_GPL(kmsg_dump_unregister);

2899 2900 2901
static bool always_kmsg_dump;
module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);

2902 2903 2904 2905
/**
 * kmsg_dump - dump kernel log to kernel message dumpers.
 * @reason: the reason (oops, panic etc) for dumping
 *
2906 2907 2908
 * Call each of the registered dumper's dump() callback, which can
 * retrieve the kmsg records with kmsg_dump_get_line() or
 * kmsg_dump_get_buffer().
2909 2910 2911 2912 2913 2914
 */
void kmsg_dump(enum kmsg_dump_reason reason)
{
	struct kmsg_dumper *dumper;
	unsigned long flags;

2915 2916 2917
	if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
		return;

2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
	rcu_read_lock();
	list_for_each_entry_rcu(dumper, &dump_list, list) {
		if (dumper->max_reason && reason > dumper->max_reason)
			continue;

		/* initialize iterator with data about the stored records */
		dumper->active = true;

		raw_spin_lock_irqsave(&logbuf_lock, flags);
		dumper->cur_seq = clear_seq;
		dumper->cur_idx = clear_idx;
		dumper->next_seq = log_next_seq;
		dumper->next_idx = log_next_idx;
		raw_spin_unlock_irqrestore(&logbuf_lock, flags);

		/* invoke dumper which will iterate over records */
		dumper->dump(dumper, reason);

		/* reset iterator */
		dumper->active = false;
	}
	rcu_read_unlock();
}

/**
2943
 * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957
 * @dumper: registered kmsg dumper
 * @syslog: include the "<4>" prefixes
 * @line: buffer to copy the line to
 * @size: maximum size of the buffer
 * @len: length of line placed into buffer
 *
 * Start at the beginning of the kmsg buffer, with the oldest kmsg
 * record, and copy one record into the provided buffer.
 *
 * Consecutive calls will return the next available record moving
 * towards the end of the buffer with the youngest messages.
 *
 * A return value of FALSE indicates that there are no more records to
 * read.
2958 2959
 *
 * The function is similar to kmsg_dump_get_line(), but grabs no locks.
2960
 */
2961 2962
bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
			       char *line, size_t size, size_t *len)
2963
{
2964
	struct printk_log *msg;
2965 2966 2967 2968 2969
	size_t l = 0;
	bool ret = false;

	if (!dumper->active)
		goto out;
2970

2971 2972 2973 2974 2975
	if (dumper->cur_seq < log_first_seq) {
		/* messages are gone, move to first available one */
		dumper->cur_seq = log_first_seq;
		dumper->cur_idx = log_first_idx;
	}
2976

2977
	/* last entry */
2978
	if (dumper->cur_seq >= log_next_seq)
2979
		goto out;
2980

2981
	msg = log_from_idx(dumper->cur_idx);
2982
	l = msg_print_text(msg, 0, syslog, line, size);
2983 2984 2985 2986 2987 2988 2989 2990 2991

	dumper->cur_idx = log_next(dumper->cur_idx);
	dumper->cur_seq++;
	ret = true;
out:
	if (len)
		*len = l;
	return ret;
}
2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021

/**
 * kmsg_dump_get_line - retrieve one kmsg log line
 * @dumper: registered kmsg dumper
 * @syslog: include the "<4>" prefixes
 * @line: buffer to copy the line to
 * @size: maximum size of the buffer
 * @len: length of line placed into buffer
 *
 * Start at the beginning of the kmsg buffer, with the oldest kmsg
 * record, and copy one record into the provided buffer.
 *
 * Consecutive calls will return the next available record moving
 * towards the end of the buffer with the youngest messages.
 *
 * A return value of FALSE indicates that there are no more records to
 * read.
 */
bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
			char *line, size_t size, size_t *len)
{
	unsigned long flags;
	bool ret;

	raw_spin_lock_irqsave(&logbuf_lock, flags);
	ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
	raw_spin_unlock_irqrestore(&logbuf_lock, flags);

	return ret;
}
3022 3023 3024 3025 3026 3027
EXPORT_SYMBOL_GPL(kmsg_dump_get_line);

/**
 * kmsg_dump_get_buffer - copy kmsg log lines
 * @dumper: registered kmsg dumper
 * @syslog: include the "<4>" prefixes
R
Randy Dunlap 已提交
3028
 * @buf: buffer to copy the line to
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
 * @size: maximum size of the buffer
 * @len: length of line placed into buffer
 *
 * Start at the end of the kmsg buffer and fill the provided buffer
 * with as many of the the *youngest* kmsg records that fit into it.
 * If the buffer is large enough, all available kmsg records will be
 * copied with a single call.
 *
 * Consecutive calls will fill the buffer with the next block of
 * available older records, not including the earlier retrieved ones.
 *
 * A return value of FALSE indicates that there are no more records to
 * read.
 */
bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
			  char *buf, size_t size, size_t *len)
{
	unsigned long flags;
	u64 seq;
	u32 idx;
	u64 next_seq;
	u32 next_idx;
3051
	enum log_flags prev;
3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073
	size_t l = 0;
	bool ret = false;

	if (!dumper->active)
		goto out;

	raw_spin_lock_irqsave(&logbuf_lock, flags);
	if (dumper->cur_seq < log_first_seq) {
		/* messages are gone, move to first available one */
		dumper->cur_seq = log_first_seq;
		dumper->cur_idx = log_first_idx;
	}

	/* last entry */
	if (dumper->cur_seq >= dumper->next_seq) {
		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
		goto out;
	}

	/* calculate length of entire buffer */
	seq = dumper->cur_seq;
	idx = dumper->cur_idx;
3074
	prev = 0;
3075
	while (seq < dumper->next_seq) {
3076
		struct printk_log *msg = log_from_idx(idx);
3077

3078
		l += msg_print_text(msg, prev, true, NULL, 0);
3079 3080
		idx = log_next(idx);
		seq++;
3081
		prev = msg->flags;
3082 3083 3084 3085 3086
	}

	/* move first record forward until length fits into the buffer */
	seq = dumper->cur_seq;
	idx = dumper->cur_idx;
3087
	prev = 0;
3088
	while (l > size && seq < dumper->next_seq) {
3089
		struct printk_log *msg = log_from_idx(idx);
3090

3091
		l -= msg_print_text(msg, prev, true, NULL, 0);
3092 3093
		idx = log_next(idx);
		seq++;
3094
		prev = msg->flags;
3095
	}
3096 3097 3098 3099 3100 3101 3102

	/* last message in next interation */
	next_seq = seq;
	next_idx = idx;

	l = 0;
	while (seq < dumper->next_seq) {
3103
		struct printk_log *msg = log_from_idx(idx);
3104

3105
		l += msg_print_text(msg, prev, syslog, buf + l, size - l);
3106 3107
		idx = log_next(idx);
		seq++;
3108
		prev = msg->flags;
3109 3110 3111 3112 3113
	}

	dumper->next_seq = next_seq;
	dumper->next_idx = next_idx;
	ret = true;
3114
	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
3115 3116 3117 3118 3119 3120
out:
	if (len)
		*len = l;
	return ret;
}
EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
3121

3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139
/**
 * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
 * @dumper: registered kmsg dumper
 *
 * Reset the dumper's iterator so that kmsg_dump_get_line() and
 * kmsg_dump_get_buffer() can be called again and used multiple
 * times within the same dumper.dump() callback.
 *
 * The function is similar to kmsg_dump_rewind(), but grabs no locks.
 */
void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
{
	dumper->cur_seq = clear_seq;
	dumper->cur_idx = clear_idx;
	dumper->next_seq = log_next_seq;
	dumper->next_idx = log_next_idx;
}

3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152
/**
 * kmsg_dump_rewind - reset the interator
 * @dumper: registered kmsg dumper
 *
 * Reset the dumper's iterator so that kmsg_dump_get_line() and
 * kmsg_dump_get_buffer() can be called again and used multiple
 * times within the same dumper.dump() callback.
 */
void kmsg_dump_rewind(struct kmsg_dumper *dumper)
{
	unsigned long flags;

	raw_spin_lock_irqsave(&logbuf_lock, flags);
3153
	kmsg_dump_rewind_nolock(dumper);
3154
	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
3155
}
3156
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
3157

3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179
static char dump_stack_arch_desc_str[128];

/**
 * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
 * @fmt: printf-style format string
 * @...: arguments for the format string
 *
 * The configured string will be printed right after utsname during task
 * dumps.  Usually used to add arch-specific system identifiers.  If an
 * arch wants to make use of such an ID string, it should initialize this
 * as soon as possible during boot.
 */
void __init dump_stack_set_arch_desc(const char *fmt, ...)
{
	va_list args;

	va_start(args, fmt);
	vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
		  fmt, args);
	va_end(args);
}

3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
/**
 * dump_stack_print_info - print generic debug info for dump_stack()
 * @log_lvl: log level
 *
 * Arch-specific dump_stack() implementations can use this function to
 * print out the same debug information as the generic dump_stack().
 */
void dump_stack_print_info(const char *log_lvl)
{
	printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n",
	       log_lvl, raw_smp_processor_id(), current->pid, current->comm,
	       print_tainted(), init_utsname()->release,
	       (int)strcspn(init_utsname()->version, " "),
	       init_utsname()->version);
3194 3195 3196 3197

	if (dump_stack_arch_desc_str[0] != '\0')
		printk("%sHardware name: %s\n",
		       log_lvl, dump_stack_arch_desc_str);
3198 3199

	print_worker_info(log_lvl, current);
3200 3201
}

3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212
/**
 * show_regs_print_info - print generic debug info for show_regs()
 * @log_lvl: log level
 *
 * show_regs() implementations can use this function to print out generic
 * debug information.
 */
void show_regs_print_info(const char *log_lvl)
{
	dump_stack_print_info(log_lvl);

3213 3214
	printk("%stask: %p task.stack: %p\n",
	       log_lvl, current, task_stack_page(current));
3215 3216
}

3217
#endif