Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
c8dbea6d
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
c8dbea6d
编写于
4月 27, 2021
作者:
P
Petr Mladek
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'printk-rework' into for-linus
上级
da34b03f
acebb559
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
316 addition
and
309 deletion
+316
-309
arch/powerpc/kernel/nvram_64.c
arch/powerpc/kernel/nvram_64.c
+5
-3
arch/powerpc/xmon/xmon.c
arch/powerpc/xmon/xmon.c
+3
-3
arch/um/kernel/kmsg_dump.c
arch/um/kernel/kmsg_dump.c
+12
-1
drivers/hv/vmbus_drv.c
drivers/hv/vmbus_drv.c
+3
-1
drivers/mtd/mtdoops.c
drivers/mtd/mtdoops.c
+15
-2
fs/pstore/platform.c
fs/pstore/platform.c
+4
-1
include/linux/kmsg_dump.h
include/linux/kmsg_dump.h
+17
-30
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_main.c
+5
-5
kernel/printk/internal.h
kernel/printk/internal.h
+1
-6
kernel/printk/printk.c
kernel/printk/printk.c
+240
-238
kernel/printk/printk_safe.c
kernel/printk/printk_safe.c
+11
-19
未找到文件。
arch/powerpc/kernel/nvram_64.c
浏览文件 @
c8dbea6d
...
...
@@ -647,6 +647,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
{
struct
oops_log_info
*
oops_hdr
=
(
struct
oops_log_info
*
)
oops_buf
;
static
unsigned
int
oops_count
=
0
;
static
struct
kmsg_dump_iter
iter
;
static
bool
panicking
=
false
;
static
DEFINE_SPINLOCK
(
lock
);
unsigned
long
flags
;
...
...
@@ -681,13 +682,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
return
;
if
(
big_oops_buf
)
{
kmsg_dump_get_buffer
(
dumper
,
false
,
kmsg_dump_rewind
(
&
iter
);
kmsg_dump_get_buffer
(
&
iter
,
false
,
big_oops_buf
,
big_oops_buf_sz
,
&
text_len
);
rc
=
zip_oops
(
text_len
);
}
if
(
rc
!=
0
)
{
kmsg_dump_rewind
(
dump
er
);
kmsg_dump_get_buffer
(
dump
er
,
false
,
kmsg_dump_rewind
(
&
it
er
);
kmsg_dump_get_buffer
(
&
it
er
,
false
,
oops_data
,
oops_data_sz
,
&
text_len
);
err_type
=
ERR_TYPE_KERNEL_PANIC
;
oops_hdr
->
version
=
cpu_to_be16
(
OOPS_HDR_VERSION
);
...
...
arch/powerpc/xmon/xmon.c
浏览文件 @
c8dbea6d
...
...
@@ -3005,7 +3005,7 @@ print_address(unsigned long addr)
static
void
dump_log_buf
(
void
)
{
struct
kmsg_dump
er
dumper
=
{
.
active
=
1
}
;
struct
kmsg_dump
_iter
iter
;
unsigned
char
buf
[
128
];
size_t
len
;
...
...
@@ -3017,9 +3017,9 @@ dump_log_buf(void)
catch_memory_errors
=
1
;
sync
();
kmsg_dump_rewind
_nolock
(
&
dump
er
);
kmsg_dump_rewind
(
&
it
er
);
xmon_start_pagination
();
while
(
kmsg_dump_get_line
_nolock
(
&
dump
er
,
false
,
buf
,
sizeof
(
buf
),
&
len
))
{
while
(
kmsg_dump_get_line
(
&
it
er
,
false
,
buf
,
sizeof
(
buf
),
&
len
))
{
buf
[
len
]
=
'\0'
;
printf
(
"%s"
,
buf
);
}
...
...
arch/um/kernel/kmsg_dump.c
浏览文件 @
c8dbea6d
// SPDX-License-Identifier: GPL-2.0
#include <linux/kmsg_dump.h>
#include <linux/spinlock.h>
#include <linux/console.h>
#include <linux/string.h>
#include <shared/init.h>
...
...
@@ -9,8 +10,11 @@
static
void
kmsg_dumper_stdout
(
struct
kmsg_dumper
*
dumper
,
enum
kmsg_dump_reason
reason
)
{
static
struct
kmsg_dump_iter
iter
;
static
DEFINE_SPINLOCK
(
lock
);
static
char
line
[
1024
];
struct
console
*
con
;
unsigned
long
flags
;
size_t
len
=
0
;
/* only dump kmsg when no console is available */
...
...
@@ -29,11 +33,18 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
if
(
con
)
return
;
if
(
!
spin_trylock_irqsave
(
&
lock
,
flags
))
return
;
kmsg_dump_rewind
(
&
iter
);
printf
(
"kmsg_dump:
\n
"
);
while
(
kmsg_dump_get_line
(
dump
er
,
true
,
line
,
sizeof
(
line
),
&
len
))
{
while
(
kmsg_dump_get_line
(
&
it
er
,
true
,
line
,
sizeof
(
line
),
&
len
))
{
line
[
len
]
=
'\0'
;
printf
(
"%s"
,
line
);
}
spin_unlock_irqrestore
(
&
lock
,
flags
);
}
static
struct
kmsg_dumper
kmsg_dumper
=
{
...
...
drivers/hv/vmbus_drv.c
浏览文件 @
c8dbea6d
...
...
@@ -1391,6 +1391,7 @@ static void vmbus_isr(void)
static
void
hv_kmsg_dump
(
struct
kmsg_dumper
*
dumper
,
enum
kmsg_dump_reason
reason
)
{
struct
kmsg_dump_iter
iter
;
size_t
bytes_written
;
phys_addr_t
panic_pa
;
...
...
@@ -1404,7 +1405,8 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
kmsg_dump_get_buffer
(
dumper
,
false
,
hv_panic_page
,
HV_HYP_PAGE_SIZE
,
kmsg_dump_rewind
(
&
iter
);
kmsg_dump_get_buffer
(
&
iter
,
false
,
hv_panic_page
,
HV_HYP_PAGE_SIZE
,
&
bytes_written
);
if
(
bytes_written
)
hyperv_report_panic_msg
(
panic_pa
,
bytes_written
);
...
...
drivers/mtd/mtdoops.c
浏览文件 @
c8dbea6d
...
...
@@ -52,6 +52,7 @@ static struct mtdoops_context {
int
nextcount
;
unsigned
long
*
oops_page_used
;
unsigned
long
oops_buf_busy
;
void
*
oops_buf
;
}
oops_cxt
;
...
...
@@ -180,6 +181,9 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
u32
*
hdr
;
int
ret
;
if
(
test_and_set_bit
(
0
,
&
cxt
->
oops_buf_busy
))
return
;
/* Add mtdoops header to the buffer */
hdr
=
cxt
->
oops_buf
;
hdr
[
0
]
=
cxt
->
nextcount
;
...
...
@@ -190,7 +194,7 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
record_size
,
&
retlen
,
cxt
->
oops_buf
);
if
(
ret
==
-
EOPNOTSUPP
)
{
printk
(
KERN_ERR
"mtdoops: Cannot write from panic without panic_write
\n
"
);
return
;
goto
out
;
}
}
else
ret
=
mtd_write
(
mtd
,
cxt
->
nextpage
*
record_size
,
...
...
@@ -203,6 +207,8 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
memset
(
cxt
->
oops_buf
,
0xff
,
record_size
);
mtdoops_inc_counter
(
cxt
);
out:
clear_bit
(
0
,
&
cxt
->
oops_buf_busy
);
}
static
void
mtdoops_workfunc_write
(
struct
work_struct
*
work
)
...
...
@@ -271,13 +277,19 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
{
struct
mtdoops_context
*
cxt
=
container_of
(
dumper
,
struct
mtdoops_context
,
dump
);
struct
kmsg_dump_iter
iter
;
/* Only dump oopses if dump_oops is set */
if
(
reason
==
KMSG_DUMP_OOPS
&&
!
dump_oops
)
return
;
kmsg_dump_get_buffer
(
dumper
,
true
,
cxt
->
oops_buf
+
MTDOOPS_HEADER_SIZE
,
kmsg_dump_rewind
(
&
iter
);
if
(
test_and_set_bit
(
0
,
&
cxt
->
oops_buf_busy
))
return
;
kmsg_dump_get_buffer
(
&
iter
,
true
,
cxt
->
oops_buf
+
MTDOOPS_HEADER_SIZE
,
record_size
-
MTDOOPS_HEADER_SIZE
,
NULL
);
clear_bit
(
0
,
&
cxt
->
oops_buf_busy
);
if
(
reason
!=
KMSG_DUMP_OOPS
)
{
/* Panics must be written immediately */
...
...
@@ -394,6 +406,7 @@ static int __init mtdoops_init(void)
return
-
ENOMEM
;
}
memset
(
cxt
->
oops_buf
,
0xff
,
record_size
);
cxt
->
oops_buf_busy
=
0
;
INIT_WORK
(
&
cxt
->
work_erase
,
mtdoops_workfunc_erase
);
INIT_WORK
(
&
cxt
->
work_write
,
mtdoops_workfunc_write
);
...
...
fs/pstore/platform.c
浏览文件 @
c8dbea6d
...
...
@@ -385,6 +385,7 @@ void pstore_record_init(struct pstore_record *record,
static
void
pstore_dump
(
struct
kmsg_dumper
*
dumper
,
enum
kmsg_dump_reason
reason
)
{
struct
kmsg_dump_iter
iter
;
unsigned
long
total
=
0
;
const
char
*
why
;
unsigned
int
part
=
1
;
...
...
@@ -405,6 +406,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
}
}
kmsg_dump_rewind
(
&
iter
);
oopscount
++
;
while
(
total
<
kmsg_bytes
)
{
char
*
dst
;
...
...
@@ -435,7 +438,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
dst_size
-=
header_size
;
/* Write dump contents. */
if
(
!
kmsg_dump_get_buffer
(
dump
er
,
true
,
dst
+
header_size
,
if
(
!
kmsg_dump_get_buffer
(
&
it
er
,
true
,
dst
+
header_size
,
dst_size
,
&
dump_size
))
break
;
...
...
include/linux/kmsg_dump.h
浏览文件 @
c8dbea6d
...
...
@@ -29,6 +29,16 @@ enum kmsg_dump_reason {
KMSG_DUMP_MAX
};
/**
* struct kmsg_dump_iter - iterator for retrieving kernel messages
* @cur_seq: Points to the oldest message to dump
* @next_seq: Points after the newest message to dump
*/
struct
kmsg_dump_iter
{
u64
cur_seq
;
u64
next_seq
;
};
/**
* struct kmsg_dumper - kernel crash message dumper structure
* @list: Entry in the dumper list (private)
...
...
@@ -41,31 +51,19 @@ struct kmsg_dumper {
struct
list_head
list
;
void
(
*
dump
)(
struct
kmsg_dumper
*
dumper
,
enum
kmsg_dump_reason
reason
);
enum
kmsg_dump_reason
max_reason
;
bool
active
;
bool
registered
;
/* private state of the kmsg iterator */
u32
cur_idx
;
u32
next_idx
;
u64
cur_seq
;
u64
next_seq
;
};
#ifdef CONFIG_PRINTK
void
kmsg_dump
(
enum
kmsg_dump_reason
reason
);
bool
kmsg_dump_get_line_nolock
(
struct
kmsg_dumper
*
dumper
,
bool
syslog
,
char
*
line
,
size_t
size
,
size_t
*
len
);
bool
kmsg_dump_get_line
(
struct
kmsg_dumper
*
dumper
,
bool
syslog
,
bool
kmsg_dump_get_line
(
struct
kmsg_dump_iter
*
iter
,
bool
syslog
,
char
*
line
,
size_t
size
,
size_t
*
len
);
bool
kmsg_dump_get_buffer
(
struct
kmsg_dumper
*
dumper
,
bool
syslog
,
char
*
buf
,
size_t
size
,
size_t
*
len
);
void
kmsg_dump_rewind_nolock
(
struct
kmsg_dumper
*
dumper
);
bool
kmsg_dump_get_buffer
(
struct
kmsg_dump_iter
*
iter
,
bool
syslog
,
char
*
buf
,
size_t
size
,
size_t
*
len_out
);
void
kmsg_dump_rewind
(
struct
kmsg_dump
er
*
dump
er
);
void
kmsg_dump_rewind
(
struct
kmsg_dump
_iter
*
it
er
);
int
kmsg_dump_register
(
struct
kmsg_dumper
*
dumper
);
...
...
@@ -77,30 +75,19 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
{
}
static
inline
bool
kmsg_dump_get_line_nolock
(
struct
kmsg_dumper
*
dumper
,
bool
syslog
,
const
char
*
line
,
size_t
size
,
size_t
*
len
)
{
return
false
;
}
static
inline
bool
kmsg_dump_get_line
(
struct
kmsg_dumper
*
dumper
,
bool
syslog
,
static
inline
bool
kmsg_dump_get_line
(
struct
kmsg_dump_iter
*
iter
,
bool
syslog
,
const
char
*
line
,
size_t
size
,
size_t
*
len
)
{
return
false
;
}
static
inline
bool
kmsg_dump_get_buffer
(
struct
kmsg_dump
er
*
dump
er
,
bool
syslog
,
static
inline
bool
kmsg_dump_get_buffer
(
struct
kmsg_dump
_iter
*
it
er
,
bool
syslog
,
char
*
buf
,
size_t
size
,
size_t
*
len
)
{
return
false
;
}
static
inline
void
kmsg_dump_rewind_nolock
(
struct
kmsg_dumper
*
dumper
)
{
}
static
inline
void
kmsg_dump_rewind
(
struct
kmsg_dumper
*
dumper
)
static
inline
void
kmsg_dump_rewind
(
struct
kmsg_dump_iter
*
iter
)
{
}
...
...
kernel/debug/kdb/kdb_main.c
浏览文件 @
c8dbea6d
...
...
@@ -2101,7 +2101,7 @@ static int kdb_dmesg(int argc, const char **argv)
int
adjust
=
0
;
int
n
=
0
;
int
skip
=
0
;
struct
kmsg_dump
er
dumper
=
{
.
active
=
1
}
;
struct
kmsg_dump
_iter
iter
;
size_t
len
;
char
buf
[
201
];
...
...
@@ -2126,8 +2126,8 @@ static int kdb_dmesg(int argc, const char **argv)
kdb_set
(
2
,
setargs
);
}
kmsg_dump_rewind
_nolock
(
&
dump
er
);
while
(
kmsg_dump_get_line
_nolock
(
&
dump
er
,
1
,
NULL
,
0
,
NULL
))
kmsg_dump_rewind
(
&
it
er
);
while
(
kmsg_dump_get_line
(
&
it
er
,
1
,
NULL
,
0
,
NULL
))
n
++
;
if
(
lines
<
0
)
{
...
...
@@ -2159,8 +2159,8 @@ static int kdb_dmesg(int argc, const char **argv)
if
(
skip
>=
n
||
skip
<
0
)
return
0
;
kmsg_dump_rewind
_nolock
(
&
dump
er
);
while
(
kmsg_dump_get_line
_nolock
(
&
dump
er
,
1
,
buf
,
sizeof
(
buf
),
&
len
))
{
kmsg_dump_rewind
(
&
it
er
);
while
(
kmsg_dump_get_line
(
&
it
er
,
1
,
buf
,
sizeof
(
buf
),
&
len
))
{
if
(
skip
)
{
skip
--
;
continue
;
...
...
kernel/printk/internal.h
浏览文件 @
c8dbea6d
...
...
@@ -12,8 +12,6 @@
#define PRINTK_NMI_CONTEXT_OFFSET 0x010000000
extern
raw_spinlock_t
logbuf_lock
;
__printf
(
4
,
0
)
int
vprintk_store
(
int
facility
,
int
level
,
const
struct
dev_printk_info
*
dev_info
,
...
...
@@ -21,7 +19,6 @@ int vprintk_store(int facility, int level,
__printf
(
1
,
0
)
int
vprintk_default
(
const
char
*
fmt
,
va_list
args
);
__printf
(
1
,
0
)
int
vprintk_deferred
(
const
char
*
fmt
,
va_list
args
);
__printf
(
1
,
0
)
int
vprintk_func
(
const
char
*
fmt
,
va_list
args
);
void
__printk_safe_enter
(
void
);
void
__printk_safe_exit
(
void
);
...
...
@@ -56,10 +53,8 @@ void defer_console_output(void);
#else
__printf
(
1
,
0
)
int
vprintk_func
(
const
char
*
fmt
,
va_list
args
)
{
return
0
;
}
/*
* In !PRINTK builds we still export
logbuf_lock spin_lock,
console_sem
* In !PRINTK builds we still export console_sem
* semaphore and some of console functions (console_unlock()/etc.), so
* printk-safe must preserve the existing local IRQ guarantees.
*/
...
...
kernel/printk/printk.c
浏览文件 @
c8dbea6d
...
...
@@ -262,7 +262,7 @@ static void __up_console_sem(unsigned long ip)
* definitely not the perfect debug tool (we don't know if _WE_
* hold it and are racing, but it helps tracking those weird code
* paths in the console code where we end up in places I want
* locked without the console sem
pa
hore held).
* locked without the console sem
ap
hore held).
*/
static
int
console_locked
,
console_suspended
;
...
...
@@ -355,62 +355,50 @@ enum log_flags {
LOG_CONT
=
8
,
/* text is a fragment of a continuation line */
};
/*
* The logbuf_lock protects kmsg buffer, indices, counters. This can be taken
* within the scheduler's rq lock. It must be released before calling
* console_unlock() or anything else that might wake up a process.
*/
DEFINE_RAW_SPINLOCK
(
logbuf_lock
);
/*
* Helper macros to lock/unlock logbuf_lock and switch between
* printk-safe/unsafe modes.
*/
#define logbuf_lock_irq() \
do { \
printk_safe_enter_irq(); \
raw_spin_lock(&logbuf_lock); \
} while (0)
#define logbuf_unlock_irq() \
do { \
raw_spin_unlock(&logbuf_lock); \
printk_safe_exit_irq(); \
} while (0)
#define logbuf_lock_irqsave(flags) \
do { \
printk_safe_enter_irqsave(flags); \
raw_spin_lock(&logbuf_lock); \
} while (0)
#define logbuf_unlock_irqrestore(flags) \
do { \
raw_spin_unlock(&logbuf_lock); \
printk_safe_exit_irqrestore(flags); \
} while (0)
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
static
DEFINE_RAW_SPINLOCK
(
syslog_lock
);
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD
(
log_wait
);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static
u64
syslog_seq
;
static
size_t
syslog_partial
;
static
bool
syslog_time
;
/* All 3 protected by @console_sem. */
/* the next printk record to write to the console */
static
u64
console_seq
;
static
u64
exclusive_console_stop_seq
;
static
unsigned
long
console_dropped
;
/* the next printk record to read after the last 'clear' command */
static
u64
clear_seq
;
struct
latched_seq
{
seqcount_latch_t
latch
;
u64
val
[
2
];
};
/*
* The next printk record to read after the last 'clear' command. There are
* two copies (updated with seqcount_latch) so that reads can locklessly
* access a valid value. Writers are synchronized by @syslog_lock.
*/
static
struct
latched_seq
clear_seq
=
{
.
latch
=
SEQCNT_LATCH_ZERO
(
clear_seq
.
latch
),
.
val
[
0
]
=
0
,
.
val
[
1
]
=
0
,
};
#ifdef CONFIG_PRINTK_CALLER
#define PREFIX_MAX 48
#else
#define PREFIX_MAX 32
#endif
#define LOG_LINE_MAX (1024 - PREFIX_MAX)
/* the maximum size of a formatted record (i.e. with prefix added per line) */
#define CONSOLE_LOG_MAX 1024
/* the maximum size allowed to be reserved for a record */
#define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
#define LOG_LEVEL(v) ((v) & 0x07)
#define LOG_FACILITY(v) ((v) >> 3 & 0xff)
...
...
@@ -452,6 +440,31 @@ bool printk_percpu_data_ready(void)
return
__printk_percpu_data_ready
;
}
/* Must be called under syslog_lock. */
static
void
latched_seq_write
(
struct
latched_seq
*
ls
,
u64
val
)
{
raw_write_seqcount_latch
(
&
ls
->
latch
);
ls
->
val
[
0
]
=
val
;
raw_write_seqcount_latch
(
&
ls
->
latch
);
ls
->
val
[
1
]
=
val
;
}
/* Can be called from any context. */
static
u64
latched_seq_read_nolock
(
struct
latched_seq
*
ls
)
{
unsigned
int
seq
;
unsigned
int
idx
;
u64
val
;
do
{
seq
=
raw_read_seqcount_latch
(
&
ls
->
latch
);
idx
=
seq
&
0x1
;
val
=
ls
->
val
[
idx
];
}
while
(
read_seqcount_latch_retry
(
&
ls
->
latch
,
seq
));
return
val
;
}
/* Return log buffer address */
char
*
log_buf_addr_get
(
void
)
{
...
...
@@ -619,7 +632,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
/* /dev/kmsg - userspace message inject/listen interface */
struct
devkmsg_user
{
u64
seq
;
atomic64_t
seq
;
struct
ratelimit_state
rs
;
struct
mutex
lock
;
char
buf
[
CONSOLE_EXT_LOG_MAX
];
...
...
@@ -719,27 +732,27 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
if
(
ret
)
return
ret
;
logbuf_lock
_irq
();
if
(
!
prb_read_valid
(
prb
,
user
->
seq
,
r
))
{
printk_safe_enter
_irq
();
if
(
!
prb_read_valid
(
prb
,
atomic64_read
(
&
user
->
seq
)
,
r
))
{
if
(
file
->
f_flags
&
O_NONBLOCK
)
{
ret
=
-
EAGAIN
;
logbuf_unlock
_irq
();
printk_safe_exit
_irq
();
goto
out
;
}
logbuf_unlock
_irq
();
printk_safe_exit
_irq
();
ret
=
wait_event_interruptible
(
log_wait
,
prb_read_valid
(
prb
,
user
->
seq
,
r
));
prb_read_valid
(
prb
,
atomic64_read
(
&
user
->
seq
)
,
r
));
if
(
ret
)
goto
out
;
logbuf_lock
_irq
();
printk_safe_enter
_irq
();
}
if
(
r
->
info
->
seq
!=
user
->
seq
)
{
if
(
r
->
info
->
seq
!=
atomic64_read
(
&
user
->
seq
)
)
{
/* our last seen message is gone, return error and reset */
user
->
seq
=
r
->
info
->
seq
;
atomic64_set
(
&
user
->
seq
,
r
->
info
->
seq
)
;
ret
=
-
EPIPE
;
logbuf_unlock
_irq
();
printk_safe_exit
_irq
();
goto
out
;
}
...
...
@@ -748,8 +761,8 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
&
r
->
text_buf
[
0
],
r
->
info
->
text_len
,
&
r
->
info
->
dev_info
);
user
->
seq
=
r
->
info
->
seq
+
1
;
logbuf_unlock
_irq
();
atomic64_set
(
&
user
->
seq
,
r
->
info
->
seq
+
1
)
;
printk_safe_exit
_irq
();
if
(
len
>
count
)
{
ret
=
-
EINVAL
;
...
...
@@ -784,11 +797,11 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
if
(
offset
)
return
-
ESPIPE
;
logbuf_lock
_irq
();
printk_safe_enter
_irq
();
switch
(
whence
)
{
case
SEEK_SET
:
/* the first record */
user
->
seq
=
prb_first_valid_seq
(
prb
);
atomic64_set
(
&
user
->
seq
,
prb_first_valid_seq
(
prb
)
);
break
;
case
SEEK_DATA
:
/*
...
...
@@ -796,16 +809,16 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
* like issued by 'dmesg -c'. Reading /dev/kmsg itself
* changes no global state, and does not clear anything.
*/
user
->
seq
=
clear_seq
;
atomic64_set
(
&
user
->
seq
,
latched_seq_read_nolock
(
&
clear_seq
))
;
break
;
case
SEEK_END
:
/* after the last record */
user
->
seq
=
prb_next_seq
(
prb
);
atomic64_set
(
&
user
->
seq
,
prb_next_seq
(
prb
)
);
break
;
default:
ret
=
-
EINVAL
;
}
logbuf_unlock
_irq
();
printk_safe_exit
_irq
();
return
ret
;
}
...
...
@@ -820,15 +833,15 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
poll_wait
(
file
,
&
log_wait
,
wait
);
logbuf_lock
_irq
();
if
(
prb_read_valid_info
(
prb
,
user
->
seq
,
&
info
,
NULL
))
{
printk_safe_enter
_irq
();
if
(
prb_read_valid_info
(
prb
,
atomic64_read
(
&
user
->
seq
)
,
&
info
,
NULL
))
{
/* return error when data has vanished underneath us */
if
(
info
.
seq
!=
user
->
seq
)
if
(
info
.
seq
!=
atomic64_read
(
&
user
->
seq
)
)
ret
=
EPOLLIN
|
EPOLLRDNORM
|
EPOLLERR
|
EPOLLPRI
;
else
ret
=
EPOLLIN
|
EPOLLRDNORM
;
}
logbuf_unlock
_irq
();
printk_safe_exit
_irq
();
return
ret
;
}
...
...
@@ -861,9 +874,9 @@ static int devkmsg_open(struct inode *inode, struct file *file)
prb_rec_init_rd
(
&
user
->
record
,
&
user
->
info
,
&
user
->
text_buf
[
0
],
sizeof
(
user
->
text_buf
));
logbuf_lock
_irq
();
user
->
seq
=
prb_first_valid_seq
(
prb
);
logbuf_unlock
_irq
();
printk_safe_enter
_irq
();
atomic64_set
(
&
user
->
seq
,
prb_first_valid_seq
(
prb
)
);
printk_safe_exit
_irq
();
file
->
private_data
=
user
;
return
0
;
...
...
@@ -955,6 +968,9 @@ void log_buf_vmcoreinfo_setup(void)
VMCOREINFO_SIZE
(
atomic_long_t
);
VMCOREINFO_TYPE_OFFSET
(
atomic_long_t
,
counter
);
VMCOREINFO_STRUCT_SIZE
(
latched_seq
);
VMCOREINFO_OFFSET
(
latched_seq
,
val
);
}
#endif
...
...
@@ -1421,6 +1437,50 @@ static size_t get_record_print_text_size(struct printk_info *info,
return
((
prefix_len
*
line_count
)
+
info
->
text_len
+
1
);
}
/*
* Beginning with @start_seq, find the first record where it and all following
* records up to (but not including) @max_seq fit into @size.
*
* @max_seq is simply an upper bound and does not need to exist. If the caller
* does not require an upper bound, -1 can be used for @max_seq.
*/
static
u64
find_first_fitting_seq
(
u64
start_seq
,
u64
max_seq
,
size_t
size
,
bool
syslog
,
bool
time
)
{
struct
printk_info
info
;
unsigned
int
line_count
;
size_t
len
=
0
;
u64
seq
;
/* Determine the size of the records up to @max_seq. */
prb_for_each_info
(
start_seq
,
prb
,
seq
,
&
info
,
&
line_count
)
{
if
(
info
.
seq
>=
max_seq
)
break
;
len
+=
get_record_print_text_size
(
&
info
,
line_count
,
syslog
,
time
);
}
/*
* Adjust the upper bound for the next loop to avoid subtracting
* lengths that were never added.
*/
if
(
seq
<
max_seq
)
max_seq
=
seq
;
/*
* Move first record forward until length fits into the buffer. Ignore
* newest messages that were not counted in the above cycle. Messages
* might appear and get lost in the meantime. This is a best effort
* that prevents an infinite loop that could occur with a retry.
*/
prb_for_each_info
(
start_seq
,
prb
,
seq
,
&
info
,
&
line_count
)
{
if
(
len
<=
size
||
info
.
seq
>=
max_seq
)
break
;
len
-=
get_record_print_text_size
(
&
info
,
line_count
,
syslog
,
time
);
}
return
seq
;
}
static
int
syslog_print
(
char
__user
*
buf
,
int
size
)
{
struct
printk_info
info
;
...
...
@@ -1428,19 +1488,21 @@ static int syslog_print(char __user *buf, int size)
char
*
text
;
int
len
=
0
;
text
=
kmalloc
(
LOG_LINE_MAX
+
PREFIX
_MAX
,
GFP_KERNEL
);
text
=
kmalloc
(
CONSOLE_LOG
_MAX
,
GFP_KERNEL
);
if
(
!
text
)
return
-
ENOMEM
;
prb_rec_init_rd
(
&
r
,
&
info
,
text
,
LOG_LINE_MAX
+
PREFIX
_MAX
);
prb_rec_init_rd
(
&
r
,
&
info
,
text
,
CONSOLE_LOG
_MAX
);
while
(
size
>
0
)
{
size_t
n
;
size_t
skip
;
logbuf_lock_irq
();
printk_safe_enter_irq
();
raw_spin_lock
(
&
syslog_lock
);
if
(
!
prb_read_valid
(
prb
,
syslog_seq
,
&
r
))
{
logbuf_unlock_irq
();
raw_spin_unlock
(
&
syslog_lock
);
printk_safe_exit_irq
();
break
;
}
if
(
r
.
info
->
seq
!=
syslog_seq
)
{
...
...
@@ -1469,7 +1531,8 @@ static int syslog_print(char __user *buf, int size)
syslog_partial
+=
n
;
}
else
n
=
0
;
logbuf_unlock_irq
();
raw_spin_unlock
(
&
syslog_lock
);
printk_safe_exit_irq
();
if
(
!
n
)
break
;
...
...
@@ -1492,34 +1555,26 @@ static int syslog_print(char __user *buf, int size)
static
int
syslog_print_all
(
char
__user
*
buf
,
int
size
,
bool
clear
)
{
struct
printk_info
info
;
unsigned
int
line_count
;
struct
printk_record
r
;
char
*
text
;
int
len
=
0
;
u64
seq
;
bool
time
;
text
=
kmalloc
(
LOG_LINE_MAX
+
PREFIX
_MAX
,
GFP_KERNEL
);
text
=
kmalloc
(
CONSOLE_LOG
_MAX
,
GFP_KERNEL
);
if
(
!
text
)
return
-
ENOMEM
;
time
=
printk_time
;
logbuf_lock
_irq
();
printk_safe_enter
_irq
();
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
*/
prb_for_each_info
(
clear_seq
,
prb
,
seq
,
&
info
,
&
line_count
)
len
+=
get_record_print_text_size
(
&
info
,
line_count
,
true
,
time
);
seq
=
find_first_fitting_seq
(
latched_seq_read_nolock
(
&
clear_seq
),
-
1
,
size
,
true
,
time
);
/* move first record forward until length fits into the buffer */
prb_for_each_info
(
clear_seq
,
prb
,
seq
,
&
info
,
&
line_count
)
{
if
(
len
<=
size
)
break
;
len
-=
get_record_print_text_size
(
&
info
,
line_count
,
true
,
time
);
}
prb_rec_init_rd
(
&
r
,
&
info
,
text
,
LOG_LINE_MAX
+
PREFIX_MAX
);
prb_rec_init_rd
(
&
r
,
&
info
,
text
,
CONSOLE_LOG_MAX
);
len
=
0
;
prb_for_each_record
(
seq
,
prb
,
seq
,
&
r
)
{
...
...
@@ -1532,20 +1587,23 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
break
;
}
logbuf_unlock
_irq
();
printk_safe_exit
_irq
();
if
(
copy_to_user
(
buf
+
len
,
text
,
textlen
))
len
=
-
EFAULT
;
else
len
+=
textlen
;
logbuf_lock
_irq
();
printk_safe_enter
_irq
();
if
(
len
<
0
)
break
;
}
if
(
clear
)
clear_seq
=
seq
;
logbuf_unlock_irq
();
if
(
clear
)
{
raw_spin_lock
(
&
syslog_lock
);
latched_seq_write
(
&
clear_seq
,
seq
);
raw_spin_unlock
(
&
syslog_lock
);
}
printk_safe_exit_irq
();
kfree
(
text
);
return
len
;
...
...
@@ -1553,9 +1611,23 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
static
void
syslog_clear
(
void
)
{
logbuf_lock_irq
();
clear_seq
=
prb_next_seq
(
prb
);
logbuf_unlock_irq
();
printk_safe_enter_irq
();
raw_spin_lock
(
&
syslog_lock
);
latched_seq_write
(
&
clear_seq
,
prb_next_seq
(
prb
));
raw_spin_unlock
(
&
syslog_lock
);
printk_safe_exit_irq
();
}
/* Return a consistent copy of @syslog_seq. */
static
u64
read_syslog_seq_irq
(
void
)
{
u64
seq
;
raw_spin_lock_irq
(
&
syslog_lock
);
seq
=
syslog_seq
;
raw_spin_unlock_irq
(
&
syslog_lock
);
return
seq
;
}
int
do_syslog
(
int
type
,
char
__user
*
buf
,
int
len
,
int
source
)
...
...
@@ -1581,8 +1653,9 @@ int do_syslog(int type, char __user *buf, int len, int source)
return
0
;
if
(
!
access_ok
(
buf
,
len
))
return
-
EFAULT
;
error
=
wait_event_interruptible
(
log_wait
,
prb_read_valid
(
prb
,
syslog_seq
,
NULL
));
prb_read_valid
(
prb
,
read_syslog_seq_irq
()
,
NULL
));
if
(
error
)
return
error
;
error
=
syslog_print
(
buf
,
len
);
...
...
@@ -1630,10 +1703,12 @@ int do_syslog(int type, char __user *buf, int len, int source)
break
;
/* Number of chars in the log buffer */
case
SYSLOG_ACTION_SIZE_UNREAD
:
logbuf_lock_irq
();
printk_safe_enter_irq
();
raw_spin_lock
(
&
syslog_lock
);
if
(
!
prb_read_valid_info
(
prb
,
syslog_seq
,
&
info
,
NULL
))
{
/* No unread messages. */
logbuf_unlock_irq
();
raw_spin_unlock
(
&
syslog_lock
);
printk_safe_exit_irq
();
return
0
;
}
if
(
info
.
seq
!=
syslog_seq
)
{
...
...
@@ -1661,7 +1736,8 @@ int do_syslog(int type, char __user *buf, int len, int source)
}
error
-=
syslog_partial
;
}
logbuf_unlock_irq
();
raw_spin_unlock
(
&
syslog_lock
);
printk_safe_exit_irq
();
break
;
/* Size of the log buffer */
case
SYSLOG_ACTION_SIZE_BUFFER
:
...
...
@@ -2104,12 +2180,6 @@ asmlinkage int vprintk_emit(int facility, int level,
}
EXPORT_SYMBOL
(
vprintk_emit
);
asmlinkage
int
vprintk
(
const
char
*
fmt
,
va_list
args
)
{
return
vprintk_func
(
fmt
,
args
);
}
EXPORT_SYMBOL
(
vprintk
);
int
vprintk_default
(
const
char
*
fmt
,
va_list
args
)
{
return
vprintk_emit
(
0
,
LOGLEVEL_DEFAULT
,
NULL
,
fmt
,
args
);
...
...
@@ -2143,7 +2213,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
int
r
;
va_start
(
args
,
fmt
);
r
=
vprintk
_func
(
fmt
,
args
);
r
=
vprintk
(
fmt
,
args
);
va_end
(
args
);
return
r
;
...
...
@@ -2152,8 +2222,7 @@ EXPORT_SYMBOL(printk);
#else
/* CONFIG_PRINTK */
#define LOG_LINE_MAX 0
#define PREFIX_MAX 0
#define CONSOLE_LOG_MAX 0
#define printk_time false
#define prb_read_valid(rb, seq, r) false
...
...
@@ -2262,7 +2331,7 @@ static int __init console_setup(char *str)
/*
* console="" or console=null have been suggested as a way to
* disable console output. Use ttynull that has been created
* for exacly this purpose.
* for exac
t
ly this purpose.
*/
if
(
str
[
0
]
==
0
||
strcmp
(
str
,
"null"
)
==
0
)
{
__add_preferred_console
(
"ttynull"
,
0
,
NULL
,
NULL
,
true
);
...
...
@@ -2471,7 +2540,7 @@ static inline int can_use_console(void)
void
console_unlock
(
void
)
{
static
char
ext_text
[
CONSOLE_EXT_LOG_MAX
];
static
char
text
[
LOG_LINE_MAX
+
PREFIX
_MAX
];
static
char
text
[
CONSOLE_LOG
_MAX
];
unsigned
long
flags
;
bool
do_cond_resched
,
retry
;
struct
printk_info
info
;
...
...
@@ -2518,7 +2587,6 @@ void console_unlock(void)
size_t
len
;
printk_safe_enter_irqsave
(
flags
);
raw_spin_lock
(
&
logbuf_lock
);
skip:
if
(
!
prb_read_valid
(
prb
,
console_seq
,
&
r
))
break
;
...
...
@@ -2562,7 +2630,6 @@ void console_unlock(void)
console_msg_format
&
MSG_FORMAT_SYSLOG
,
printk_time
);
console_seq
++
;
raw_spin_unlock
(
&
logbuf_lock
);
/*
* While actively printing out messages, if another printk()
...
...
@@ -2589,8 +2656,6 @@ void console_unlock(void)
console_locked
=
0
;
raw_spin_unlock
(
&
logbuf_lock
);
up_console_sem
();
/*
...
...
@@ -2599,9 +2664,7 @@ void console_unlock(void)
* there's a new owner and the console_unlock() from them will do the
* flush, no worries.
*/
raw_spin_lock
(
&
logbuf_lock
);
retry
=
prb_read_valid
(
prb
,
console_seq
,
NULL
);
raw_spin_unlock
(
&
logbuf_lock
);
printk_safe_exit_irqrestore
(
flags
);
if
(
retry
&&
console_trylock
())
...
...
@@ -2668,9 +2731,9 @@ void console_flush_on_panic(enum con_flush_mode mode)
if
(
mode
==
CONSOLE_REPLAY_ALL
)
{
unsigned
long
flags
;
logbuf_lock
_irqsave
(
flags
);
printk_safe_enter
_irqsave
(
flags
);
console_seq
=
prb_first_valid_seq
(
prb
);
logbuf_unlock
_irqrestore
(
flags
);
printk_safe_exit
_irqrestore
(
flags
);
}
console_unlock
();
}
...
...
@@ -2898,9 +2961,7 @@ void register_console(struct console *newcon)
/*
* console_unlock(); will print out the buffered messages
* for us.
*/
logbuf_lock_irqsave
(
flags
);
/*
*
* We're about to replay the log buffer. Only do this to the
* just-registered console to avoid excessive message spam to
* the already-registered consoles.
...
...
@@ -2911,8 +2972,11 @@ void register_console(struct console *newcon)
*/
exclusive_console
=
newcon
;
exclusive_console_stop_seq
=
console_seq
;
/* Get a consistent copy of @syslog_seq. */
raw_spin_lock_irqsave
(
&
syslog_lock
,
flags
);
console_seq
=
syslog_seq
;
logbuf_unlock_irqrestore
(
flags
);
raw_spin_unlock_irqrestore
(
&
syslog_lock
,
flags
);
}
console_unlock
();
console_sysfs_notify
();
...
...
@@ -3042,7 +3106,7 @@ void __init console_init(void)
*
* To mitigate this problem somewhat, only unregister consoles whose memory
* intersects with the init section. Note that all other boot consoles will
* get unregistred when the real preferred console is registered.
* get unregist
e
red when the real preferred console is registered.
*/
static
int
__init
printk_late_init
(
void
)
{
...
...
@@ -3276,7 +3340,6 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
void
kmsg_dump
(
enum
kmsg_dump_reason
reason
)
{
struct
kmsg_dumper
*
dumper
;
unsigned
long
flags
;
rcu_read_lock
();
list_for_each_entry_rcu
(
dumper
,
&
dump_list
,
list
)
{
...
...
@@ -3293,26 +3356,15 @@ void kmsg_dump(enum kmsg_dump_reason reason)
if
(
reason
>
max_reason
)
continue
;
/* initialize iterator with data about the stored records */
dumper
->
active
=
true
;
logbuf_lock_irqsave
(
flags
);
dumper
->
cur_seq
=
clear_seq
;
dumper
->
next_seq
=
prb_next_seq
(
prb
);
logbuf_unlock_irqrestore
(
flags
);
/* invoke dumper which will iterate over records */
dumper
->
dump
(
dumper
,
reason
);
/* reset iterator */
dumper
->
active
=
false
;
}
rcu_read_unlock
();
}
/**
* kmsg_dump_get_line
_nolock - retrieve one kmsg log line (unlocked version)
* @
dumper: registered kmsg dumpe
r
* kmsg_dump_get_line
- retrieve one kmsg log line
* @
iter: kmsg dump iterato
r
* @syslog: include the "<4>" prefixes
* @line: buffer to copy the line to
* @size: maximum size of the buffer
...
...
@@ -3326,30 +3378,31 @@ void kmsg_dump(enum kmsg_dump_reason reason)
*
* A return value of FALSE indicates that there are no more records to
* read.
*
* The function is similar to kmsg_dump_get_line(), but grabs no locks.
*/
bool
kmsg_dump_get_line
_nolock
(
struct
kmsg_dumper
*
dump
er
,
bool
syslog
,
char
*
line
,
size_t
size
,
size_t
*
len
)
bool
kmsg_dump_get_line
(
struct
kmsg_dump_iter
*
it
er
,
bool
syslog
,
char
*
line
,
size_t
size
,
size_t
*
len
)
{
u64
min_seq
=
latched_seq_read_nolock
(
&
clear_seq
);
struct
printk_info
info
;
unsigned
int
line_count
;
struct
printk_record
r
;
unsigned
long
flags
;
size_t
l
=
0
;
bool
ret
=
false
;
prb_rec_init_rd
(
&
r
,
&
info
,
line
,
size
);
if
(
iter
->
cur_seq
<
min_seq
)
iter
->
cur_seq
=
min_seq
;
if
(
!
dumper
->
active
)
goto
out
;
printk_safe_enter_irqsave
(
flags
);
prb_rec_init_rd
(
&
r
,
&
info
,
line
,
size
)
;
/* Read text or count text lines? */
if
(
line
)
{
if
(
!
prb_read_valid
(
prb
,
dump
er
->
cur_seq
,
&
r
))
if
(
!
prb_read_valid
(
prb
,
it
er
->
cur_seq
,
&
r
))
goto
out
;
l
=
record_print_text
(
&
r
,
syslog
,
printk_time
);
}
else
{
if
(
!
prb_read_valid_info
(
prb
,
dump
er
->
cur_seq
,
if
(
!
prb_read_valid_info
(
prb
,
it
er
->
cur_seq
,
&
info
,
&
line_count
))
{
goto
out
;
}
...
...
@@ -3358,52 +3411,23 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
}
dump
er
->
cur_seq
=
r
.
info
->
seq
+
1
;
it
er
->
cur_seq
=
r
.
info
->
seq
+
1
;
ret
=
true
;
out:
printk_safe_exit_irqrestore
(
flags
);
if
(
len
)
*
len
=
l
;
return
ret
;
}
/**
* kmsg_dump_get_line - retrieve one kmsg log line
* @dumper: registered kmsg dumper
* @syslog: include the "<4>" prefixes
* @line: buffer to copy the line to
* @size: maximum size of the buffer
* @len: length of line placed into buffer
*
* Start at the beginning of the kmsg buffer, with the oldest kmsg
* record, and copy one record into the provided buffer.
*
* Consecutive calls will return the next available record moving
* towards the end of the buffer with the youngest messages.
*
* A return value of FALSE indicates that there are no more records to
* read.
*/
bool
kmsg_dump_get_line
(
struct
kmsg_dumper
*
dumper
,
bool
syslog
,
char
*
line
,
size_t
size
,
size_t
*
len
)
{
unsigned
long
flags
;
bool
ret
;
logbuf_lock_irqsave
(
flags
);
ret
=
kmsg_dump_get_line_nolock
(
dumper
,
syslog
,
line
,
size
,
len
);
logbuf_unlock_irqrestore
(
flags
);
return
ret
;
}
EXPORT_SYMBOL_GPL
(
kmsg_dump_get_line
);
/**
* kmsg_dump_get_buffer - copy kmsg log lines
* @
dumper: registered kmsg dumpe
r
* @
iter: kmsg dump iterato
r
* @syslog: include the "<4>" prefixes
* @buf: buffer to copy the line to
* @size: maximum size of the buffer
* @len: length of line placed into buffer
* @len
_out
: length of line placed into buffer
*
* Start at the end of the kmsg buffer and fill the provided buffer
* with as many of the *youngest* kmsg records that fit into it.
...
...
@@ -3416,115 +3440,93 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
* A return value of FALSE indicates that there are no more records to
* read.
*/
bool
kmsg_dump_get_buffer
(
struct
kmsg_dump
er
*
dump
er
,
bool
syslog
,
char
*
buf
,
size_t
size
,
size_t
*
len
)
bool
kmsg_dump_get_buffer
(
struct
kmsg_dump
_iter
*
it
er
,
bool
syslog
,
char
*
buf
,
size_t
size
,
size_t
*
len
_out
)
{
u64
min_seq
=
latched_seq_read_nolock
(
&
clear_seq
);
struct
printk_info
info
;
unsigned
int
line_count
;
struct
printk_record
r
;
unsigned
long
flags
;
u64
seq
;
u64
next_seq
;
size_t
l
=
0
;
size_t
l
en
=
0
;
bool
ret
=
false
;
bool
time
=
printk_time
;
prb_rec_init_rd
(
&
r
,
&
info
,
buf
,
size
);
if
(
!
dumper
->
active
||
!
buf
||
!
size
)
if
(
!
buf
||
!
size
)
goto
out
;
logbuf_lock_irqsave
(
flags
);
if
(
prb_read_valid_info
(
prb
,
dumper
->
cur_seq
,
&
info
,
NULL
))
{
if
(
info
.
seq
!=
dumper
->
cur_seq
)
{
if
(
iter
->
cur_seq
<
min_seq
)
iter
->
cur_seq
=
min_seq
;
printk_safe_enter_irqsave
(
flags
);
if
(
prb_read_valid_info
(
prb
,
iter
->
cur_seq
,
&
info
,
NULL
))
{
if
(
info
.
seq
!=
iter
->
cur_seq
)
{
/* messages are gone, move to first available one */
dump
er
->
cur_seq
=
info
.
seq
;
it
er
->
cur_seq
=
info
.
seq
;
}
}
/* last entry */
if
(
dumper
->
cur_seq
>=
dump
er
->
next_seq
)
{
logbuf_unlock
_irqrestore
(
flags
);
if
(
iter
->
cur_seq
>=
it
er
->
next_seq
)
{
printk_safe_exit
_irqrestore
(
flags
);
goto
out
;
}
/* calculate length of entire buffer */
seq
=
dumper
->
cur_seq
;
while
(
prb_read_valid_info
(
prb
,
seq
,
&
info
,
&
line_count
))
{
if
(
r
.
info
->
seq
>=
dumper
->
next_seq
)
break
;
l
+=
get_record_print_text_size
(
&
info
,
line_count
,
syslog
,
time
);
seq
=
r
.
info
->
seq
+
1
;
}
/* move first record forward until length fits into the buffer */
seq
=
dumper
->
cur_seq
;
while
(
l
>=
size
&&
prb_read_valid_info
(
prb
,
seq
,
&
info
,
&
line_count
))
{
if
(
r
.
info
->
seq
>=
dumper
->
next_seq
)
break
;
l
-=
get_record_print_text_size
(
&
info
,
line_count
,
syslog
,
time
);
seq
=
r
.
info
->
seq
+
1
;
}
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump. Pass in size-1
* because this function (by way of record_print_text()) will
* not write more than size-1 bytes of text into @buf.
*/
seq
=
find_first_fitting_seq
(
iter
->
cur_seq
,
iter
->
next_seq
,
size
-
1
,
syslog
,
time
);
/* last message in next interation */
/*
* Next kmsg_dump_get_buffer() invocation will dump block of
* older records stored right before this one.
*/
next_seq
=
seq
;
/* actually read text into the buffer now */
l
=
0
;
while
(
prb_read_valid
(
prb
,
seq
,
&
r
))
{
if
(
r
.
info
->
seq
>=
dumper
->
next_seq
)
break
;
prb_rec_init_rd
(
&
r
,
&
info
,
buf
,
size
);
l
+=
record_print_text
(
&
r
,
syslog
,
time
);
len
=
0
;
prb_for_each_record
(
seq
,
prb
,
seq
,
&
r
)
{
if
(
r
.
info
->
seq
>=
iter
->
next_seq
)
break
;
/* adjust record to store to remaining buffer space */
prb_rec_init_rd
(
&
r
,
&
info
,
buf
+
l
,
size
-
l
);
len
+=
record_print_text
(
&
r
,
syslog
,
time
);
seq
=
r
.
info
->
seq
+
1
;
/* Adjust record to store to remaining buffer space. */
prb_rec_init_rd
(
&
r
,
&
info
,
buf
+
len
,
size
-
len
);
}
dump
er
->
next_seq
=
next_seq
;
it
er
->
next_seq
=
next_seq
;
ret
=
true
;
logbuf_unlock
_irqrestore
(
flags
);
printk_safe_exit
_irqrestore
(
flags
);
out:
if
(
len
)
*
len
=
l
;
if
(
len
_out
)
*
len
_out
=
len
;
return
ret
;
}
EXPORT_SYMBOL_GPL
(
kmsg_dump_get_buffer
);
/**
* kmsg_dump_rewind_nolock - reset the iterator (unlocked version)
* @dumper: registered kmsg dumper
*
* Reset the dumper's iterator so that kmsg_dump_get_line() and
* kmsg_dump_get_buffer() can be called again and used multiple
* times within the same dumper.dump() callback.
*
* The function is similar to kmsg_dump_rewind(), but grabs no locks.
*/
void
kmsg_dump_rewind_nolock
(
struct
kmsg_dumper
*
dumper
)
{
dumper
->
cur_seq
=
clear_seq
;
dumper
->
next_seq
=
prb_next_seq
(
prb
);
}
/**
* kmsg_dump_rewind - reset the iterator
* @
dumper: registered kmsg dumpe
r
* @
iter: kmsg dump iterato
r
*
* Reset the dumper's iterator so that kmsg_dump_get_line() and
* kmsg_dump_get_buffer() can be called again and used multiple
* times within the same dumper.dump() callback.
*/
void
kmsg_dump_rewind
(
struct
kmsg_dump
er
*
dump
er
)
void
kmsg_dump_rewind
(
struct
kmsg_dump
_iter
*
it
er
)
{
unsigned
long
flags
;
logbuf_lock_irqsave
(
flags
);
kmsg_dump_rewind_nolock
(
dumper
);
logbuf_unlock_irqrestore
(
flags
);
printk_safe_enter_irqsave
(
flags
);
iter
->
cur_seq
=
latched_seq_read_nolock
(
&
clear_seq
);
iter
->
next_seq
=
prb_next_seq
(
prb
);
printk_safe_exit_irqrestore
(
flags
);
}
EXPORT_SYMBOL_GPL
(
kmsg_dump_rewind
);
...
...
kernel/printk/printk_safe.c
浏览文件 @
c8dbea6d
...
...
@@ -16,7 +16,7 @@
#include "internal.h"
/*
*
printk() could not take logbuf_lock in NMI context
. Instead,
*
In NMI and safe mode, printk() avoids taking locks
. Instead,
* it uses an alternative implementation that temporary stores
* the strings into a per-CPU buffer. The content of the buffer
* is later flushed into the main ring buffer via IRQ work.
...
...
@@ -267,17 +267,9 @@ void printk_safe_flush(void)
void
printk_safe_flush_on_panic
(
void
)
{
/*
* Make sure that we could access the
main ring buffer
.
* Make sure that we could access the
safe buffers
.
* Do not risk a double release when more CPUs are up.
*/
if
(
raw_spin_is_locked
(
&
logbuf_lock
))
{
if
(
num_online_cpus
()
>
1
)
return
;
debug_locks_off
();
raw_spin_lock_init
(
&
logbuf_lock
);
}
if
(
raw_spin_is_locked
(
&
safe_read_lock
))
{
if
(
num_online_cpus
()
>
1
)
return
;
...
...
@@ -319,9 +311,7 @@ void noinstr printk_nmi_exit(void)
* reordering.
*
* It has effect only when called in NMI context. Then printk()
* will try to store the messages into the main logbuf directly
* and use the per-CPU buffers only as a fallback when the lock
* is not available.
* will store the messages into the main logbuf directly.
*/
void
printk_nmi_direct_enter
(
void
)
{
...
...
@@ -367,7 +357,7 @@ void __printk_safe_exit(void)
this_cpu_dec
(
printk_context
);
}
__printf
(
1
,
0
)
int
vprintk_func
(
const
char
*
fmt
,
va_list
args
)
asmlinkage
int
vprintk
(
const
char
*
fmt
,
va_list
args
)
{
#ifdef CONFIG_KGDB_KDB
/* Allow to pass printk() to kdb but avoid a recursion. */
...
...
@@ -376,20 +366,21 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
#endif
/*
*
Try to u
se the main logbuf even in NMI. But avoid calling console
*
U
se the main logbuf even in NMI. But avoid calling console
* drivers that might have their own locks.
*/
if
((
this_cpu_read
(
printk_context
)
&
PRINTK_NMI_DIRECT_CONTEXT_MASK
)
&&
raw_spin_trylock
(
&
logbuf_lock
))
{
if
((
this_cpu_read
(
printk_context
)
&
PRINTK_NMI_DIRECT_CONTEXT_MASK
)
)
{
unsigned
long
flags
;
int
len
;
printk_safe_enter_irqsave
(
flags
);
len
=
vprintk_store
(
0
,
LOGLEVEL_DEFAULT
,
NULL
,
fmt
,
args
);
raw_spin_unlock
(
&
logbuf_lock
);
printk_safe_exit_irqrestore
(
flags
);
defer_console_output
();
return
len
;
}
/* Use extra buffer in NMI
when logbuf_lock is taken or in safe mode
. */
/* Use extra buffer in NMI. */
if
(
this_cpu_read
(
printk_context
)
&
PRINTK_NMI_CONTEXT_MASK
)
return
vprintk_nmi
(
fmt
,
args
);
...
...
@@ -420,3 +411,4 @@ void __init printk_safe_init(void)
/* Flush pending messages that did not have scheduled IRQ works. */
printk_safe_flush
();
}
EXPORT_SYMBOL
(
vprintk
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录