Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
e4106133
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
158
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
e4106133
编写于
3月 17, 2009
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'tracing/syscalls' into tracing/core
上级
7243f214
2fc1dfbe
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
34 addition
and
29 deletion
+34
-29
kernel/trace/Kconfig
kernel/trace/Kconfig
+1
-0
kernel/trace/trace.c
kernel/trace/trace.c
+3
-2
kernel/trace/trace_selftest.c
kernel/trace/trace_selftest.c
+6
-10
kernel/trace/trace_syscalls.c
kernel/trace/trace_syscalls.c
+24
-17
未找到文件。
kernel/trace/Kconfig
浏览文件 @
e4106133
...
...
@@ -182,6 +182,7 @@ config FTRACE_SYSCALLS
bool "Trace syscalls"
depends on HAVE_FTRACE_SYSCALLS
select TRACING
select KALLSYMS
help
Basic tracer to catch the syscall entry and exit events.
...
...
kernel/trace/trace.c
浏览文件 @
e4106133
...
...
@@ -2494,7 +2494,7 @@ static int tracing_set_tracer(const char *buf)
if
(
!
ring_buffer_expanded
)
{
ret
=
tracing_resize_ring_buffer
(
trace_buf_size
);
if
(
ret
<
0
)
return
re
t
;
goto
ou
t
;
ret
=
0
;
}
...
...
@@ -4125,7 +4125,8 @@ __init static int tracer_alloc_buffers(void)
&
trace_panic_notifier
);
register_die_notifier
(
&
trace_die_notifier
);
ret
=
0
;
return
0
;
out_free_cpumask:
free_cpumask_var
(
tracing_reader_cpumask
);
...
...
kernel/trace/trace_selftest.c
浏览文件 @
e4106133
...
...
@@ -414,7 +414,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
ret
=
tracer_init
(
trace
,
tr
);
if
(
ret
)
{
warn_failed_init_tracer
(
trace
,
ret
);
goto
out
;
goto
out
_no_start
;
}
/* reset the max latency */
...
...
@@ -432,21 +432,16 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
tracing_stop
();
/* check both trace buffers */
ret
=
trace_test_buffer
(
tr
,
NULL
);
if
(
ret
)
{
tracing_start
();
if
(
ret
)
goto
out
;
}
ret
=
trace_test_buffer
(
&
max_tr
,
&
count
);
if
(
ret
)
{
tracing_start
();
if
(
ret
)
goto
out
;
}
if
(
!
ret
&&
!
count
)
{
printk
(
KERN_CONT
".. no entries found .."
);
ret
=
-
1
;
tracing_start
();
goto
out
;
}
...
...
@@ -475,9 +470,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
goto
out
;
}
out:
trace
->
reset
(
tr
);
out:
tracing_start
();
out_no_start:
trace
->
reset
(
tr
);
tracing_max_latency
=
save_max
;
return
ret
;
...
...
kernel/trace/trace_syscalls.c
浏览文件 @
e4106133
...
...
@@ -5,9 +5,13 @@
#include "trace_output.h"
#include "trace.h"
static
atomic_t
refcount
;
/* Keep a counter of the syscall tracing users */
static
int
refcount
;
/* Our two options */
/* Prevent from races on thread flags toggling */
static
DEFINE_MUTEX
(
syscall_trace_lock
);
/* Option to display the parameters types */
enum
{
TRACE_SYSCALLS_OPT_TYPES
=
0x1
,
};
...
...
@@ -18,7 +22,7 @@ static struct tracer_opt syscalls_opts[] = {
};
static
struct
tracer_flags
syscalls_flags
=
{
.
val
=
0
,
/* By default: no
arg
s types */
.
val
=
0
,
/* By default: no
parameter
s types */
.
opts
=
syscalls_opts
};
...
...
@@ -96,8 +100,11 @@ void start_ftrace_syscalls(void)
unsigned
long
flags
;
struct
task_struct
*
g
,
*
t
;
if
(
atomic_inc_return
(
&
refcount
)
!=
1
)
goto
out
;
mutex_lock
(
&
syscall_trace_lock
);
/* Don't enable the flag on the tasks twice */
if
(
++
refcount
!=
1
)
goto
unlock
;
arch_init_ftrace_syscalls
();
read_lock_irqsave
(
&
tasklist_lock
,
flags
);
...
...
@@ -107,8 +114,9 @@ void start_ftrace_syscalls(void)
}
while_each_thread
(
g
,
t
);
read_unlock_irqrestore
(
&
tasklist_lock
,
flags
);
out:
atomic_dec
(
&
refcount
);
unlock:
mutex_unlock
(
&
syscall_trace_lock
);
}
void
stop_ftrace_syscalls
(
void
)
...
...
@@ -116,8 +124,11 @@ void stop_ftrace_syscalls(void)
unsigned
long
flags
;
struct
task_struct
*
g
,
*
t
;
if
(
atomic_dec_return
(
&
refcount
))
goto
out
;
mutex_lock
(
&
syscall_trace_lock
);
/* There are perhaps still some users */
if
(
--
refcount
)
goto
unlock
;
read_lock_irqsave
(
&
tasklist_lock
,
flags
);
...
...
@@ -126,8 +137,9 @@ void stop_ftrace_syscalls(void)
}
while_each_thread
(
g
,
t
);
read_unlock_irqrestore
(
&
tasklist_lock
,
flags
);
out:
atomic_inc
(
&
refcount
);
unlock:
mutex_unlock
(
&
syscall_trace_lock
);
}
void
ftrace_syscall_enter
(
struct
pt_regs
*
regs
)
...
...
@@ -137,12 +149,9 @@ void ftrace_syscall_enter(struct pt_regs *regs)
struct
ring_buffer_event
*
event
;
int
size
;
int
syscall_nr
;
int
cpu
;
syscall_nr
=
syscall_get_nr
(
current
,
regs
);
cpu
=
raw_smp_processor_id
();
sys_data
=
syscall_nr_to_meta
(
syscall_nr
);
if
(
!
sys_data
)
return
;
...
...
@@ -168,12 +177,9 @@ void ftrace_syscall_exit(struct pt_regs *regs)
struct
syscall_metadata
*
sys_data
;
struct
ring_buffer_event
*
event
;
int
syscall_nr
;
int
cpu
;
syscall_nr
=
syscall_get_nr
(
current
,
regs
);
cpu
=
raw_smp_processor_id
();
sys_data
=
syscall_nr_to_meta
(
syscall_nr
);
if
(
!
sys_data
)
return
;
...
...
@@ -201,6 +207,7 @@ static int init_syscall_tracer(struct trace_array *tr)
static
void
reset_syscall_tracer
(
struct
trace_array
*
tr
)
{
stop_ftrace_syscalls
();
tracing_reset_online_cpus
(
tr
);
}
static
struct
trace_event
syscall_enter_event
=
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录