Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
910e94dd
K
Kernel
项目概览
openeuler
/
Kernel
接近 2 年 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
910e94dd
编写于
10月 12, 2011
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'tip/perf/core' of
git://github.com/rostedt/linux
into perf/core
上级
177e2163
d696b58c
变更
17
展开全部
隐藏空白更改
内联
并排
Showing
17 changed file
with
928 addition
and
479 deletion
+928
-479
arch/x86/kernel/jump_label.c
arch/x86/kernel/jump_label.c
+1
-1
include/linux/module.h
include/linux/module.h
+0
-12
include/linux/ring_buffer.h
include/linux/ring_buffer.h
+2
-0
include/linux/trace_clock.h
include/linux/trace_clock.h
+1
-0
include/linux/tracepoint.h
include/linux/tracepoint.h
+11
-14
kernel/module.c
kernel/module.c
+0
-47
kernel/trace/Makefile
kernel/trace/Makefile
+2
-0
kernel/trace/ftrace.c
kernel/trace/ftrace.c
+8
-0
kernel/trace/ring_buffer.c
kernel/trace/ring_buffer.c
+69
-1
kernel/trace/trace.c
kernel/trace/trace.c
+145
-28
kernel/trace/trace.h
kernel/trace/trace.h
+6
-10
kernel/trace/trace_clock.c
kernel/trace/trace_clock.c
+12
-0
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_filter.c
+462
-333
kernel/trace/trace_events_filter_test.h
kernel/trace/trace_events_filter_test.h
+50
-0
kernel/trace/trace_irqsoff.c
kernel/trace/trace_irqsoff.c
+2
-2
kernel/trace/trace_printk.c
kernel/trace/trace_printk.c
+10
-9
kernel/tracepoint.c
kernel/tracepoint.c
+147
-22
未找到文件。
arch/x86/kernel/jump_label.c
浏览文件 @
910e94dd
...
...
@@ -42,7 +42,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
put_online_cpus
();
}
void
arch_jump_label_text_poke_early
(
jump_label_t
addr
)
void
__init_or_module
arch_jump_label_text_poke_early
(
jump_label_t
addr
)
{
text_poke_early
((
void
*
)
addr
,
ideal_nops
[
NOP_ATOMIC5
],
JUMP_LABEL_NOP_SIZE
);
...
...
include/linux/module.h
浏览文件 @
910e94dd
...
...
@@ -580,9 +580,6 @@ int unregister_module_notifier(struct notifier_block * nb);
extern
void
print_modules
(
void
);
extern
void
module_update_tracepoints
(
void
);
extern
int
module_get_iter_tracepoints
(
struct
tracepoint_iter
*
iter
);
#else
/* !CONFIG_MODULES... */
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
...
...
@@ -698,15 +695,6 @@ static inline int unregister_module_notifier(struct notifier_block * nb)
static
inline
void
print_modules
(
void
)
{
}
static
inline
void
module_update_tracepoints
(
void
)
{
}
static
inline
int
module_get_iter_tracepoints
(
struct
tracepoint_iter
*
iter
)
{
return
0
;
}
#endif
/* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
...
...
include/linux/ring_buffer.h
浏览文件 @
910e94dd
...
...
@@ -154,6 +154,8 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void
ring_buffer_record_disable_cpu
(
struct
ring_buffer
*
buffer
,
int
cpu
);
void
ring_buffer_record_enable_cpu
(
struct
ring_buffer
*
buffer
,
int
cpu
);
unsigned
long
ring_buffer_oldest_event_ts
(
struct
ring_buffer
*
buffer
,
int
cpu
);
unsigned
long
ring_buffer_bytes_cpu
(
struct
ring_buffer
*
buffer
,
int
cpu
);
unsigned
long
ring_buffer_entries
(
struct
ring_buffer
*
buffer
);
unsigned
long
ring_buffer_overruns
(
struct
ring_buffer
*
buffer
);
unsigned
long
ring_buffer_entries_cpu
(
struct
ring_buffer
*
buffer
,
int
cpu
);
...
...
include/linux/trace_clock.h
浏览文件 @
910e94dd
...
...
@@ -15,5 +15,6 @@
extern
u64
notrace
trace_clock_local
(
void
);
extern
u64
notrace
trace_clock
(
void
);
extern
u64
notrace
trace_clock_global
(
void
);
extern
u64
notrace
trace_clock_counter
(
void
);
#endif
/* _LINUX_TRACE_CLOCK_H */
include/linux/tracepoint.h
浏览文件 @
910e94dd
...
...
@@ -54,8 +54,18 @@ extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void
*
data
);
extern
void
tracepoint_probe_update_all
(
void
);
#ifdef CONFIG_MODULES
struct
tp_module
{
struct
list_head
list
;
unsigned
int
num_tracepoints
;
struct
tracepoint
*
const
*
tracepoints_ptrs
;
};
#endif
/* CONFIG_MODULES */
struct
tracepoint_iter
{
struct
module
*
module
;
#ifdef CONFIG_MODULES
struct
tp_module
*
module
;
#endif
/* CONFIG_MODULES */
struct
tracepoint
*
const
*
tracepoint
;
};
...
...
@@ -63,8 +73,6 @@ extern void tracepoint_iter_start(struct tracepoint_iter *iter);
extern
void
tracepoint_iter_next
(
struct
tracepoint_iter
*
iter
);
extern
void
tracepoint_iter_stop
(
struct
tracepoint_iter
*
iter
);
extern
void
tracepoint_iter_reset
(
struct
tracepoint_iter
*
iter
);
extern
int
tracepoint_get_iter_range
(
struct
tracepoint
*
const
**
tracepoint
,
struct
tracepoint
*
const
*
begin
,
struct
tracepoint
*
const
*
end
);
/*
* tracepoint_synchronize_unregister must be called between the last tracepoint
...
...
@@ -78,17 +86,6 @@ static inline void tracepoint_synchronize_unregister(void)
#define PARAMS(args...) args
#ifdef CONFIG_TRACEPOINTS
extern
void
tracepoint_update_probe_range
(
struct
tracepoint
*
const
*
begin
,
struct
tracepoint
*
const
*
end
);
#else
static
inline
void
tracepoint_update_probe_range
(
struct
tracepoint
*
const
*
begin
,
struct
tracepoint
*
const
*
end
)
{
}
#endif
/* CONFIG_TRACEPOINTS */
#endif
/* _LINUX_TRACEPOINT_H */
/*
...
...
kernel/module.c
浏览文件 @
910e94dd
...
...
@@ -3487,50 +3487,3 @@ void module_layout(struct module *mod,
}
EXPORT_SYMBOL
(
module_layout
);
#endif
#ifdef CONFIG_TRACEPOINTS
void
module_update_tracepoints
(
void
)
{
struct
module
*
mod
;
mutex_lock
(
&
module_mutex
);
list_for_each_entry
(
mod
,
&
modules
,
list
)
if
(
!
mod
->
taints
)
tracepoint_update_probe_range
(
mod
->
tracepoints_ptrs
,
mod
->
tracepoints_ptrs
+
mod
->
num_tracepoints
);
mutex_unlock
(
&
module_mutex
);
}
/*
* Returns 0 if current not found.
* Returns 1 if current found.
*/
int
module_get_iter_tracepoints
(
struct
tracepoint_iter
*
iter
)
{
struct
module
*
iter_mod
;
int
found
=
0
;
mutex_lock
(
&
module_mutex
);
list_for_each_entry
(
iter_mod
,
&
modules
,
list
)
{
if
(
!
iter_mod
->
taints
)
{
/*
* Sorted module list
*/
if
(
iter_mod
<
iter
->
module
)
continue
;
else
if
(
iter_mod
>
iter
->
module
)
iter
->
tracepoint
=
NULL
;
found
=
tracepoint_get_iter_range
(
&
iter
->
tracepoint
,
iter_mod
->
tracepoints_ptrs
,
iter_mod
->
tracepoints_ptrs
+
iter_mod
->
num_tracepoints
);
if
(
found
)
{
iter
->
module
=
iter_mod
;
break
;
}
}
}
mutex_unlock
(
&
module_mutex
);
return
found
;
}
#endif
kernel/trace/Makefile
浏览文件 @
910e94dd
...
...
@@ -15,6 +15,8 @@ ifdef CONFIG_TRACING_BRANCHES
KBUILD_CFLAGS
+=
-DDISABLE_BRANCH_PROFILING
endif
CFLAGS_trace_events_filter.o
:=
-I
$(src)
#
# Make the trace clocks available generally: it's infrastructure
# relied on by ptrace for example:
...
...
kernel/trace/ftrace.c
浏览文件 @
910e94dd
...
...
@@ -3862,6 +3862,14 @@ void ftrace_kill(void)
clear_ftrace_function
();
}
/**
* Test if ftrace is dead or not.
*/
int
ftrace_is_dead
(
void
)
{
return
ftrace_disabled
;
}
/**
* register_ftrace_function - register a function for profiling
* @ops - ops structure that holds the function for profiling.
...
...
kernel/trace/ring_buffer.c
浏览文件 @
910e94dd
...
...
@@ -488,12 +488,14 @@ struct ring_buffer_per_cpu {
struct
buffer_page
*
reader_page
;
unsigned
long
lost_events
;
unsigned
long
last_overrun
;
local_t
entries_bytes
;
local_t
commit_overrun
;
local_t
overrun
;
local_t
entries
;
local_t
committing
;
local_t
commits
;
unsigned
long
read
;
unsigned
long
read_bytes
;
u64
write_stamp
;
u64
read_stamp
;
};
...
...
@@ -1708,6 +1710,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* the counters.
*/
local_add
(
entries
,
&
cpu_buffer
->
overrun
);
local_sub
(
BUF_PAGE_SIZE
,
&
cpu_buffer
->
entries_bytes
);
/*
* The entries will be zeroed out when we move the
...
...
@@ -1863,6 +1866,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
event
=
__rb_page_index
(
tail_page
,
tail
);
kmemcheck_annotate_bitfield
(
event
,
bitfield
);
/* account for padding bytes */
local_add
(
BUF_PAGE_SIZE
-
tail
,
&
cpu_buffer
->
entries_bytes
);
/*
* Save the original length to the meta data.
* This will be used by the reader to add lost event
...
...
@@ -2054,6 +2060,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if
(
!
tail
)
tail_page
->
page
->
time_stamp
=
ts
;
/* account for these added bytes */
local_add
(
length
,
&
cpu_buffer
->
entries_bytes
);
return
event
;
}
...
...
@@ -2076,6 +2085,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
if
(
bpage
->
page
==
(
void
*
)
addr
&&
rb_page_write
(
bpage
)
==
old_index
)
{
unsigned
long
write_mask
=
local_read
(
&
bpage
->
write
)
&
~
RB_WRITE_MASK
;
unsigned
long
event_length
=
rb_event_length
(
event
);
/*
* This is on the tail page. It is possible that
* a write could come in and move the tail page
...
...
@@ -2085,8 +2095,11 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
old_index
+=
write_mask
;
new_index
+=
write_mask
;
index
=
local_cmpxchg
(
&
bpage
->
write
,
old_index
,
new_index
);
if
(
index
==
old_index
)
if
(
index
==
old_index
)
{
/* update counters */
local_sub
(
event_length
,
&
cpu_buffer
->
entries_bytes
);
return
1
;
}
}
/* could not discard */
...
...
@@ -2660,6 +2673,58 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
(
local_read
(
&
cpu_buffer
->
overrun
)
+
cpu_buffer
->
read
);
}
/**
* ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
unsigned
long
ring_buffer_oldest_event_ts
(
struct
ring_buffer
*
buffer
,
int
cpu
)
{
unsigned
long
flags
;
struct
ring_buffer_per_cpu
*
cpu_buffer
;
struct
buffer_page
*
bpage
;
unsigned
long
ret
;
if
(
!
cpumask_test_cpu
(
cpu
,
buffer
->
cpumask
))
return
0
;
cpu_buffer
=
buffer
->
buffers
[
cpu
];
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
/*
* if the tail is on reader_page, oldest time stamp is on the reader
* page
*/
if
(
cpu_buffer
->
tail_page
==
cpu_buffer
->
reader_page
)
bpage
=
cpu_buffer
->
reader_page
;
else
bpage
=
rb_set_head_page
(
cpu_buffer
);
ret
=
bpage
->
page
->
time_stamp
;
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
return
ret
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_oldest_event_ts
);
/**
* ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
unsigned
long
ring_buffer_bytes_cpu
(
struct
ring_buffer
*
buffer
,
int
cpu
)
{
struct
ring_buffer_per_cpu
*
cpu_buffer
;
unsigned
long
ret
;
if
(
!
cpumask_test_cpu
(
cpu
,
buffer
->
cpumask
))
return
0
;
cpu_buffer
=
buffer
->
buffers
[
cpu
];
ret
=
local_read
(
&
cpu_buffer
->
entries_bytes
)
-
cpu_buffer
->
read_bytes
;
return
ret
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_bytes_cpu
);
/**
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
* @buffer: The ring buffer
...
...
@@ -3527,11 +3592,13 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer
->
reader_page
->
read
=
0
;
local_set
(
&
cpu_buffer
->
commit_overrun
,
0
);
local_set
(
&
cpu_buffer
->
entries_bytes
,
0
);
local_set
(
&
cpu_buffer
->
overrun
,
0
);
local_set
(
&
cpu_buffer
->
entries
,
0
);
local_set
(
&
cpu_buffer
->
committing
,
0
);
local_set
(
&
cpu_buffer
->
commits
,
0
);
cpu_buffer
->
read
=
0
;
cpu_buffer
->
read_bytes
=
0
;
cpu_buffer
->
write_stamp
=
0
;
cpu_buffer
->
read_stamp
=
0
;
...
...
@@ -3918,6 +3985,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
}
else
{
/* update the entry counter */
cpu_buffer
->
read
+=
rb_page_entries
(
reader
);
cpu_buffer
->
read_bytes
+=
BUF_PAGE_SIZE
;
/* swap the pages */
rb_init_page
(
bpage
);
...
...
kernel/trace/trace.c
浏览文件 @
910e94dd
...
...
@@ -435,6 +435,7 @@ static struct {
}
trace_clocks
[]
=
{
{
trace_clock_local
,
"local"
},
{
trace_clock_global
,
"global"
},
{
trace_clock_counter
,
"counter"
},
};
int
trace_clock_id
;
...
...
@@ -2159,6 +2160,14 @@ void trace_default_header(struct seq_file *m)
}
}
static
void
test_ftrace_alive
(
struct
seq_file
*
m
)
{
if
(
!
ftrace_is_dead
())
return
;
seq_printf
(
m
,
"# WARNING: FUNCTION TRACING IS CORRUPTED
\n
"
);
seq_printf
(
m
,
"# MAY BE MISSING FUNCTION EVENTS
\n
"
);
}
static
int
s_show
(
struct
seq_file
*
m
,
void
*
v
)
{
struct
trace_iterator
*
iter
=
v
;
...
...
@@ -2168,6 +2177,7 @@ static int s_show(struct seq_file *m, void *v)
if
(
iter
->
tr
)
{
seq_printf
(
m
,
"# tracer: %s
\n
"
,
iter
->
trace
->
name
);
seq_puts
(
m
,
"#
\n
"
);
test_ftrace_alive
(
m
);
}
if
(
iter
->
trace
&&
iter
->
trace
->
print_header
)
iter
->
trace
->
print_header
(
m
);
...
...
@@ -3568,6 +3578,30 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
return
cnt
;
}
static
ssize_t
tracing_total_entries_read
(
struct
file
*
filp
,
char
__user
*
ubuf
,
size_t
cnt
,
loff_t
*
ppos
)
{
struct
trace_array
*
tr
=
filp
->
private_data
;
char
buf
[
64
];
int
r
,
cpu
;
unsigned
long
size
=
0
,
expanded_size
=
0
;
mutex_lock
(
&
trace_types_lock
);
for_each_tracing_cpu
(
cpu
)
{
size
+=
tr
->
entries
>>
10
;
if
(
!
ring_buffer_expanded
)
expanded_size
+=
trace_buf_size
>>
10
;
}
if
(
ring_buffer_expanded
)
r
=
sprintf
(
buf
,
"%lu
\n
"
,
size
);
else
r
=
sprintf
(
buf
,
"%lu (expanded: %lu)
\n
"
,
size
,
expanded_size
);
mutex_unlock
(
&
trace_types_lock
);
return
simple_read_from_buffer
(
ubuf
,
cnt
,
ppos
,
buf
,
r
);
}
static
ssize_t
tracing_free_buffer_write
(
struct
file
*
filp
,
const
char
__user
*
ubuf
,
size_t
cnt
,
loff_t
*
ppos
)
...
...
@@ -3594,22 +3628,24 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
return
0
;
}
static
int
mark_printk
(
const
char
*
fmt
,
...)
{
int
ret
;
va_list
args
;
va_start
(
args
,
fmt
);
ret
=
trace_vprintk
(
0
,
fmt
,
args
);
va_end
(
args
);
return
ret
;
}
static
ssize_t
tracing_mark_write
(
struct
file
*
filp
,
const
char
__user
*
ubuf
,
size_t
cnt
,
loff_t
*
fpos
)
{
char
*
buf
;
size_t
written
;
unsigned
long
addr
=
(
unsigned
long
)
ubuf
;
struct
ring_buffer_event
*
event
;
struct
ring_buffer
*
buffer
;
struct
print_entry
*
entry
;
unsigned
long
irq_flags
;
struct
page
*
pages
[
2
];
int
nr_pages
=
1
;
ssize_t
written
;
void
*
page1
;
void
*
page2
;
int
offset
;
int
size
;
int
len
;
int
ret
;
if
(
tracing_disabled
)
return
-
EINVAL
;
...
...
@@ -3617,28 +3653,81 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if
(
cnt
>
TRACE_BUF_SIZE
)
cnt
=
TRACE_BUF_SIZE
;
buf
=
kmalloc
(
cnt
+
2
,
GFP_KERNEL
);
if
(
buf
==
NULL
)
return
-
ENOMEM
;
/*
* Userspace is injecting traces into the kernel trace buffer.
* We want to be as non intrusive as possible.
* To do so, we do not want to allocate any special buffers
* or take any locks, but instead write the userspace data
* straight into the ring buffer.
*
* First we need to pin the userspace buffer into memory,
* which, most likely it is, because it just referenced it.
* But there's no guarantee that it is. By using get_user_pages_fast()
* and kmap_atomic/kunmap_atomic() we can get access to the
* pages directly. We then write the data directly into the
* ring buffer.
*/
BUILD_BUG_ON
(
TRACE_BUF_SIZE
>=
PAGE_SIZE
);
if
(
copy_from_user
(
buf
,
ubuf
,
cnt
))
{
kfree
(
buf
);
return
-
EFAULT
;
/* check if we cross pages */
if
((
addr
&
PAGE_MASK
)
!=
((
addr
+
cnt
)
&
PAGE_MASK
))
nr_pages
=
2
;
offset
=
addr
&
(
PAGE_SIZE
-
1
);
addr
&=
PAGE_MASK
;
ret
=
get_user_pages_fast
(
addr
,
nr_pages
,
0
,
pages
);
if
(
ret
<
nr_pages
)
{
while
(
--
ret
>=
0
)
put_page
(
pages
[
ret
]);
written
=
-
EFAULT
;
goto
out
;
}
if
(
buf
[
cnt
-
1
]
!=
'\n'
)
{
buf
[
cnt
]
=
'\n'
;
buf
[
cnt
+
1
]
=
'\0'
;
page1
=
kmap_atomic
(
pages
[
0
]);
if
(
nr_pages
==
2
)
page2
=
kmap_atomic
(
pages
[
1
]);
local_save_flags
(
irq_flags
);
size
=
sizeof
(
*
entry
)
+
cnt
+
2
;
/* possible \n added */
buffer
=
global_trace
.
buffer
;
event
=
trace_buffer_lock_reserve
(
buffer
,
TRACE_PRINT
,
size
,
irq_flags
,
preempt_count
());
if
(
!
event
)
{
/* Ring buffer disabled, return as if not open for write */
written
=
-
EBADF
;
goto
out_unlock
;
}
entry
=
ring_buffer_event_data
(
event
);
entry
->
ip
=
_THIS_IP_
;
if
(
nr_pages
==
2
)
{
len
=
PAGE_SIZE
-
offset
;
memcpy
(
&
entry
->
buf
,
page1
+
offset
,
len
);
memcpy
(
&
entry
->
buf
[
len
],
page2
,
cnt
-
len
);
}
else
buf
[
cnt
]
=
'\0'
;
memcpy
(
&
entry
->
buf
,
page1
+
offset
,
cnt
)
;
written
=
mark_printk
(
"%s"
,
buf
);
kfree
(
buf
);
*
fpos
+=
written
;
if
(
entry
->
buf
[
cnt
-
1
]
!=
'\n'
)
{
entry
->
buf
[
cnt
]
=
'\n'
;
entry
->
buf
[
cnt
+
1
]
=
'\0'
;
}
else
entry
->
buf
[
cnt
]
=
'\0'
;
/* don't tell userspace we wrote more - it might confuse them */
if
(
written
>
cnt
)
written
=
cnt
;
ring_buffer_unlock_commit
(
buffer
,
event
);
written
=
cnt
;
*
fpos
+=
written
;
out_unlock:
if
(
nr_pages
==
2
)
kunmap_atomic
(
page2
);
kunmap_atomic
(
page1
);
while
(
nr_pages
>
0
)
put_page
(
pages
[
--
nr_pages
]);
out:
return
written
;
}
...
...
@@ -3739,6 +3828,12 @@ static const struct file_operations tracing_entries_fops = {
.
llseek
=
generic_file_llseek
,
};
static
const
struct
file_operations
tracing_total_entries_fops
=
{
.
open
=
tracing_open_generic
,
.
read
=
tracing_total_entries_read
,
.
llseek
=
generic_file_llseek
,
};
static
const
struct
file_operations
tracing_free_buffer_fops
=
{
.
write
=
tracing_free_buffer_write
,
.
release
=
tracing_free_buffer_release
,
...
...
@@ -4026,6 +4121,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
struct
trace_array
*
tr
=
&
global_trace
;
struct
trace_seq
*
s
;
unsigned
long
cnt
;
unsigned
long
long
t
;
unsigned
long
usec_rem
;
s
=
kmalloc
(
sizeof
(
*
s
),
GFP_KERNEL
);
if
(
!
s
)
...
...
@@ -4042,6 +4139,17 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt
=
ring_buffer_commit_overrun_cpu
(
tr
->
buffer
,
cpu
);
trace_seq_printf
(
s
,
"commit overrun: %ld
\n
"
,
cnt
);
cnt
=
ring_buffer_bytes_cpu
(
tr
->
buffer
,
cpu
);
trace_seq_printf
(
s
,
"bytes: %ld
\n
"
,
cnt
);
t
=
ns2usecs
(
ring_buffer_oldest_event_ts
(
tr
->
buffer
,
cpu
));
usec_rem
=
do_div
(
t
,
USEC_PER_SEC
);
trace_seq_printf
(
s
,
"oldest event ts: %5llu.%06lu
\n
"
,
t
,
usec_rem
);
t
=
ns2usecs
(
ring_buffer_time_stamp
(
tr
->
buffer
,
cpu
));
usec_rem
=
do_div
(
t
,
USEC_PER_SEC
);
trace_seq_printf
(
s
,
"now ts: %5llu.%06lu
\n
"
,
t
,
usec_rem
);
count
=
simple_read_from_buffer
(
ubuf
,
count
,
ppos
,
s
->
buffer
,
s
->
len
);
kfree
(
s
);
...
...
@@ -4450,6 +4558,9 @@ static __init int tracer_init_debugfs(void)
trace_create_file
(
"buffer_size_kb"
,
0644
,
d_tracer
,
&
global_trace
,
&
tracing_entries_fops
);
trace_create_file
(
"buffer_total_size_kb"
,
0444
,
d_tracer
,
&
global_trace
,
&
tracing_total_entries_fops
);
trace_create_file
(
"free_buffer"
,
0644
,
d_tracer
,
&
global_trace
,
&
tracing_free_buffer_fops
);
...
...
@@ -4566,6 +4677,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
tracing_off
();
/* Did function tracer already get disabled? */
if
(
ftrace_is_dead
())
{
printk
(
"# WARNING: FUNCTION TRACING IS CORRUPTED
\n
"
);
printk
(
"# MAY BE MISSING FUNCTION EVENTS
\n
"
);
}
if
(
disable_tracing
)
ftrace_kill
();
...
...
kernel/trace/trace.h
浏览文件 @
910e94dd
...
...
@@ -579,11 +579,13 @@ static inline int ftrace_trace_task(struct task_struct *task)
return
test_tsk_trace_trace
(
task
);
}
extern
int
ftrace_is_dead
(
void
);
#else
static
inline
int
ftrace_trace_task
(
struct
task_struct
*
task
)
{
return
1
;
}
static
inline
int
ftrace_is_dead
(
void
)
{
return
0
;
}
#endif
/*
...
...
@@ -761,16 +763,10 @@ struct filter_pred {
filter_pred_fn_t
fn
;
u64
val
;
struct
regex
regex
;
/*
* Leaf nodes use field_name, ops is used by AND and OR
* nodes. The field_name is always freed when freeing a pred.
* We can overload field_name for ops and have it freed
* as well.
*/
union
{
char
*
field_name
;
unsigned
short
*
ops
;
};
unsigned
short
*
ops
;
#ifdef CONFIG_FTRACE_STARTUP_TEST
struct
ftrace_event_field
*
field
;
#endif
int
offset
;
int
not
;
int
op
;
...
...
kernel/trace/trace_clock.c
浏览文件 @
910e94dd
...
...
@@ -113,3 +113,15 @@ u64 notrace trace_clock_global(void)
return
now
;
}
static
atomic64_t
trace_counter
;
/*
* trace_clock_counter(): simply an atomic counter.
* Use the trace_counter "counter" for cases where you do not care
* about timings, but are interested in strict ordering.
*/
u64
notrace
trace_clock_counter
(
void
)
{
return
atomic64_add_return
(
1
,
&
trace_counter
);
}
kernel/trace/trace_events_filter.c
浏览文件 @
910e94dd
此差异已折叠。
点击以展开。
kernel/trace/trace_events_filter_test.h
0 → 100644
浏览文件 @
910e94dd
#undef TRACE_SYSTEM
#define TRACE_SYSTEM test
#if !defined(_TRACE_TEST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TEST_H
#include <linux/tracepoint.h>
TRACE_EVENT
(
ftrace_test_filter
,
TP_PROTO
(
int
a
,
int
b
,
int
c
,
int
d
,
int
e
,
int
f
,
int
g
,
int
h
),
TP_ARGS
(
a
,
b
,
c
,
d
,
e
,
f
,
g
,
h
),
TP_STRUCT__entry
(
__field
(
int
,
a
)
__field
(
int
,
b
)
__field
(
int
,
c
)
__field
(
int
,
d
)
__field
(
int
,
e
)
__field
(
int
,
f
)
__field
(
int
,
g
)
__field
(
int
,
h
)
),
TP_fast_assign
(
__entry
->
a
=
a
;
__entry
->
b
=
b
;
__entry
->
c
=
c
;
__entry
->
d
=
d
;
__entry
->
e
=
e
;
__entry
->
f
=
f
;
__entry
->
g
=
g
;
__entry
->
h
=
h
;
),
TP_printk
(
"a %d, b %d, c %d, d %d, e %d, f %d, g %d, h %d"
,
__entry
->
a
,
__entry
->
b
,
__entry
->
c
,
__entry
->
d
,
__entry
->
e
,
__entry
->
f
,
__entry
->
g
,
__entry
->
h
)
);
#endif
/* _TRACE_TEST_H || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_events_filter_test
/* This part must be outside protection */
#include <trace/define_trace.h>
kernel/trace/trace_irqsoff.c
浏览文件 @
910e94dd
...
...
@@ -505,13 +505,13 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#ifdef CONFIG_PREEMPT_TRACER
void
trace_preempt_on
(
unsigned
long
a0
,
unsigned
long
a1
)
{
if
(
preempt_trace
())
if
(
preempt_trace
()
&&
!
irq_trace
()
)
stop_critical_timing
(
a0
,
a1
);
}
void
trace_preempt_off
(
unsigned
long
a0
,
unsigned
long
a1
)
{
if
(
preempt_trace
())
if
(
preempt_trace
()
&&
!
irq_trace
()
)
start_critical_timing
(
a0
,
a1
);
}
#endif
/* CONFIG_PREEMPT_TRACER */
...
...
kernel/trace/trace_printk.c
浏览文件 @
910e94dd
...
...
@@ -59,18 +59,19 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
continue
;
}
fmt
=
NULL
;
tb_fmt
=
kmalloc
(
sizeof
(
*
tb_fmt
),
GFP_KERNEL
);
if
(
tb_fmt
)
if
(
tb_fmt
)
{
fmt
=
kmalloc
(
strlen
(
*
iter
)
+
1
,
GFP_KERNEL
);
if
(
tb_fmt
&&
fmt
)
{
list_add_tail
(
&
tb_fmt
->
list
,
&
trace_bprintk_fmt_list
);
strcpy
(
fmt
,
*
iter
);
tb_fmt
->
fmt
=
fmt
;
*
iter
=
tb_fmt
->
fmt
;
}
else
{
kfree
(
tb_fmt
);
*
iter
=
NULL
;
if
(
fmt
)
{
list_add_tail
(
&
tb_fmt
->
list
,
&
trace_bprintk_fmt_list
);
strcpy
(
fmt
,
*
iter
);
tb_fmt
->
fmt
=
fmt
;
}
else
kfree
(
tb_fmt
);
}
*
iter
=
fmt
;
}
mutex_unlock
(
&
btrace_mutex
);
}
...
...
kernel/tracepoint.c
浏览文件 @
910e94dd
...
...
@@ -34,11 +34,16 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[];
static
const
int
tracepoint_debug
;
/*
*
tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
*
builtin and module tracepoints and the hash table
.
*
Tracepoints mutex protects the builtin and module tracepoints and the hash
*
table, as well as the local module list
.
*/
static
DEFINE_MUTEX
(
tracepoints_mutex
);
#ifdef CONFIG_MODULES
/* Local list of struct module */
static
LIST_HEAD
(
tracepoint_module_list
);
#endif
/* CONFIG_MODULES */
/*
* Tracepoint hash table, containing the active tracepoints.
* Protected by tracepoints_mutex.
...
...
@@ -292,9 +297,10 @@ static void disable_tracepoint(struct tracepoint *elem)
* @end: end of the range
*
* Updates the probe callback corresponding to a range of tracepoints.
* Called with tracepoints_mutex held.
*/
void
tracepoint_update_probe_range
(
struct
tracepoint
*
const
*
begin
,
struct
tracepoint
*
const
*
end
)
static
void
tracepoint_update_probe_range
(
struct
tracepoint
*
const
*
begin
,
struct
tracepoint
*
const
*
end
)
{
struct
tracepoint
*
const
*
iter
;
struct
tracepoint_entry
*
mark_entry
;
...
...
@@ -302,7 +308,6 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin,
if
(
!
begin
)
return
;
mutex_lock
(
&
tracepoints_mutex
);
for
(
iter
=
begin
;
iter
<
end
;
iter
++
)
{
mark_entry
=
get_tracepoint
((
*
iter
)
->
name
);
if
(
mark_entry
)
{
...
...
@@ -312,11 +317,27 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin,
disable_tracepoint
(
*
iter
);
}
}
mutex_unlock
(
&
tracepoints_mutex
);
}
#ifdef CONFIG_MODULES
void
module_update_tracepoints
(
void
)
{
struct
tp_module
*
tp_mod
;
list_for_each_entry
(
tp_mod
,
&
tracepoint_module_list
,
list
)
tracepoint_update_probe_range
(
tp_mod
->
tracepoints_ptrs
,
tp_mod
->
tracepoints_ptrs
+
tp_mod
->
num_tracepoints
);
}
#else
/* CONFIG_MODULES */
void
module_update_tracepoints
(
void
)
{
}
#endif
/* CONFIG_MODULES */
/*
* Update probes, removing the faulty probes.
* Called with tracepoints_mutex held.
*/
static
void
tracepoint_update_probes
(
void
)
{
...
...
@@ -359,11 +380,12 @@ int tracepoint_probe_register(const char *name, void *probe, void *data)
mutex_lock
(
&
tracepoints_mutex
);
old
=
tracepoint_add_probe
(
name
,
probe
,
data
);
mutex_unlock
(
&
tracepoints_mutex
);
if
(
IS_ERR
(
old
))
if
(
IS_ERR
(
old
))
{
mutex_unlock
(
&
tracepoints_mutex
);
return
PTR_ERR
(
old
);
}
tracepoint_update_probes
();
/* may update entry */
mutex_unlock
(
&
tracepoints_mutex
);
release_probes
(
old
);
return
0
;
}
...
...
@@ -402,11 +424,12 @@ int tracepoint_probe_unregister(const char *name, void *probe, void *data)
mutex_lock
(
&
tracepoints_mutex
);
old
=
tracepoint_remove_probe
(
name
,
probe
,
data
);
mutex_unlock
(
&
tracepoints_mutex
);
if
(
IS_ERR
(
old
))
if
(
IS_ERR
(
old
))
{
mutex_unlock
(
&
tracepoints_mutex
);
return
PTR_ERR
(
old
);
}
tracepoint_update_probes
();
/* may update entry */
mutex_unlock
(
&
tracepoints_mutex
);
release_probes
(
old
);
return
0
;
}
...
...
@@ -489,9 +512,8 @@ void tracepoint_probe_update_all(void)
if
(
!
list_empty
(
&
old_probes
))
list_replace_init
(
&
old_probes
,
&
release_probes
);
need_update
=
0
;
mutex_unlock
(
&
tracepoints_mutex
);
tracepoint_update_probes
();
mutex_unlock
(
&
tracepoints_mutex
);
list_for_each_entry_safe
(
pos
,
next
,
&
release_probes
,
u
.
list
)
{
list_del
(
&
pos
->
u
.
list
);
call_rcu_sched
(
&
pos
->
u
.
rcu
,
rcu_free_old_probes
);
...
...
@@ -509,7 +531,7 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
* Will return the first tracepoint in the range if the input tracepoint is
* NULL.
*/
int
tracepoint_get_iter_range
(
struct
tracepoint
*
const
**
tracepoint
,
static
int
tracepoint_get_iter_range
(
struct
tracepoint
*
const
**
tracepoint
,
struct
tracepoint
*
const
*
begin
,
struct
tracepoint
*
const
*
end
)
{
if
(
!*
tracepoint
&&
begin
!=
end
)
{
...
...
@@ -520,11 +542,12 @@ int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
return
1
;
return
0
;
}
EXPORT_SYMBOL_GPL
(
tracepoint_get_iter_range
);
#ifdef CONFIG_MODULES
static
void
tracepoint_get_iter
(
struct
tracepoint_iter
*
iter
)
{
int
found
=
0
;
struct
tp_module
*
iter_mod
;
/* Core kernel tracepoints */
if
(
!
iter
->
module
)
{
...
...
@@ -534,12 +557,43 @@ static void tracepoint_get_iter(struct tracepoint_iter *iter)
if
(
found
)
goto
end
;
}
/* tracepoints in modules. */
found
=
module_get_iter_tracepoints
(
iter
);
/* Tracepoints in modules */
mutex_lock
(
&
tracepoints_mutex
);
list_for_each_entry
(
iter_mod
,
&
tracepoint_module_list
,
list
)
{
/*
* Sorted module list
*/
if
(
iter_mod
<
iter
->
module
)
continue
;
else
if
(
iter_mod
>
iter
->
module
)
iter
->
tracepoint
=
NULL
;
found
=
tracepoint_get_iter_range
(
&
iter
->
tracepoint
,
iter_mod
->
tracepoints_ptrs
,
iter_mod
->
tracepoints_ptrs
+
iter_mod
->
num_tracepoints
);
if
(
found
)
{
iter
->
module
=
iter_mod
;
break
;
}
}
mutex_unlock
(
&
tracepoints_mutex
);
end:
if
(
!
found
)
tracepoint_iter_reset
(
iter
);
}
#else
/* CONFIG_MODULES */
static
void
tracepoint_get_iter
(
struct
tracepoint_iter
*
iter
)
{
int
found
=
0
;
/* Core kernel tracepoints */
found
=
tracepoint_get_iter_range
(
&
iter
->
tracepoint
,
__start___tracepoints_ptrs
,
__stop___tracepoints_ptrs
);
if
(
!
found
)
tracepoint_iter_reset
(
iter
);
}
#endif
/* CONFIG_MODULES */
void
tracepoint_iter_start
(
struct
tracepoint_iter
*
iter
)
{
...
...
@@ -566,26 +620,98 @@ EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
void
tracepoint_iter_reset
(
struct
tracepoint_iter
*
iter
)
{
#ifdef CONFIG_MODULES
iter
->
module
=
NULL
;
#endif
/* CONFIG_MODULES */
iter
->
tracepoint
=
NULL
;
}
EXPORT_SYMBOL_GPL
(
tracepoint_iter_reset
);
#ifdef CONFIG_MODULES
static
int
tracepoint_module_coming
(
struct
module
*
mod
)
{
struct
tp_module
*
tp_mod
,
*
iter
;
int
ret
=
0
;
/*
* We skip modules that tain the kernel, especially those with different
* module header (for forced load), to make sure we don't cause a crash.
*/
if
(
mod
->
taints
)
return
0
;
mutex_lock
(
&
tracepoints_mutex
);
tp_mod
=
kmalloc
(
sizeof
(
struct
tp_module
),
GFP_KERNEL
);
if
(
!
tp_mod
)
{
ret
=
-
ENOMEM
;
goto
end
;
}
tp_mod
->
num_tracepoints
=
mod
->
num_tracepoints
;
tp_mod
->
tracepoints_ptrs
=
mod
->
tracepoints_ptrs
;
/*
* tracepoint_module_list is kept sorted by struct module pointer
* address for iteration on tracepoints from a seq_file that can release
* the mutex between calls.
*/
list_for_each_entry_reverse
(
iter
,
&
tracepoint_module_list
,
list
)
{
BUG_ON
(
iter
==
tp_mod
);
/* Should never be in the list twice */
if
(
iter
<
tp_mod
)
{
/* We belong to the location right after iter. */
list_add
(
&
tp_mod
->
list
,
&
iter
->
list
);
goto
module_added
;
}
}
/* We belong to the beginning of the list */
list_add
(
&
tp_mod
->
list
,
&
tracepoint_module_list
);
module_added:
tracepoint_update_probe_range
(
mod
->
tracepoints_ptrs
,
mod
->
tracepoints_ptrs
+
mod
->
num_tracepoints
);
end:
mutex_unlock
(
&
tracepoints_mutex
);
return
ret
;
}
static
int
tracepoint_module_going
(
struct
module
*
mod
)
{
struct
tp_module
*
pos
;
mutex_lock
(
&
tracepoints_mutex
);
tracepoint_update_probe_range
(
mod
->
tracepoints_ptrs
,
mod
->
tracepoints_ptrs
+
mod
->
num_tracepoints
);
list_for_each_entry
(
pos
,
&
tracepoint_module_list
,
list
)
{
if
(
pos
->
tracepoints_ptrs
==
mod
->
tracepoints_ptrs
)
{
list_del
(
&
pos
->
list
);
kfree
(
pos
);
break
;
}
}
/*
* In the case of modules that were tainted at "coming", we'll simply
* walk through the list without finding it. We cannot use the "tainted"
* flag on "going", in case a module taints the kernel only after being
* loaded.
*/
mutex_unlock
(
&
tracepoints_mutex
);
return
0
;
}
int
tracepoint_module_notify
(
struct
notifier_block
*
self
,
unsigned
long
val
,
void
*
data
)
{
struct
module
*
mod
=
data
;
int
ret
=
0
;
switch
(
val
)
{
case
MODULE_STATE_COMING
:
ret
=
tracepoint_module_coming
(
mod
);
break
;
case
MODULE_STATE_LIVE
:
break
;
case
MODULE_STATE_GOING
:
tracepoint_update_probe_range
(
mod
->
tracepoints_ptrs
,
mod
->
tracepoints_ptrs
+
mod
->
num_tracepoints
);
ret
=
tracepoint_module_going
(
mod
);
break
;
}
return
0
;
return
ret
;
}
struct
notifier_block
tracepoint_module_nb
=
{
...
...
@@ -598,7 +724,6 @@ static int init_tracepoints(void)
return
register_module_notifier
(
&
tracepoint_module_nb
);
}
__initcall
(
init_tracepoints
);
#endif
/* CONFIG_MODULES */
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录