Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
35f88e6b
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
158
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
35f88e6b
编写于
2月 23, 2009
作者:
B
Benjamin Herrenschmidt
浏览文件
操作
浏览文件
下载
差异文件
Merge commit 'ftrace/function-graph' into next
上级
3b7faeb4
712406a6
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
101 addition
and
99 deletion
+101
-99
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/ftrace.h
+0
-25
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack.c
+1
-0
arch/x86/kernel/ftrace.c
arch/x86/kernel/ftrace.c
+1
-74
include/linux/ftrace.h
include/linux/ftrace.h
+24
-0
kernel/trace/trace_functions_graph.c
kernel/trace/trace_functions_graph.c
+75
-0
未找到文件。
arch/x86/include/asm/ftrace.h
浏览文件 @
35f88e6b
...
...
@@ -55,29 +55,4 @@ struct dyn_arch_ftrace {
#endif
/* __ASSEMBLY__ */
#endif
/* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifndef __ASSEMBLY__
/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct
ftrace_ret_stack
{
unsigned
long
ret
;
unsigned
long
func
;
unsigned
long
long
calltime
;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry_32/64.S
*/
extern
void
return_to_handler
(
void
);
#endif
/* __ASSEMBLY__ */
#endif
/* CONFIG_FUNCTION_GRAPH_TRACER */
#endif
/* _ASM_X86_FTRACE_H */
arch/x86/kernel/dumpstack.c
浏览文件 @
35f88e6b
...
...
@@ -10,6 +10,7 @@
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/ftrace.h>
#include <linux/kexec.h>
#include <linux/bug.h>
#include <linux/nmi.h>
...
...
arch/x86/kernel/ftrace.c
浏览文件 @
35f88e6b
...
...
@@ -389,79 +389,6 @@ void ftrace_nmi_exit(void)
#endif
/* !CONFIG_DYNAMIC_FTRACE */
/* Add a function return address to the trace stack on thread info.*/
static
int
push_return_trace
(
unsigned
long
ret
,
unsigned
long
long
time
,
unsigned
long
func
,
int
*
depth
)
{
int
index
;
if
(
!
current
->
ret_stack
)
return
-
EBUSY
;
/* The return trace stack is full */
if
(
current
->
curr_ret_stack
==
FTRACE_RETFUNC_DEPTH
-
1
)
{
atomic_inc
(
&
current
->
trace_overrun
);
return
-
EBUSY
;
}
index
=
++
current
->
curr_ret_stack
;
barrier
();
current
->
ret_stack
[
index
].
ret
=
ret
;
current
->
ret_stack
[
index
].
func
=
func
;
current
->
ret_stack
[
index
].
calltime
=
time
;
*
depth
=
index
;
return
0
;
}
/* Retrieve a function return address to the trace stack on thread info.*/
static
void
pop_return_trace
(
struct
ftrace_graph_ret
*
trace
,
unsigned
long
*
ret
)
{
int
index
;
index
=
current
->
curr_ret_stack
;
if
(
unlikely
(
index
<
0
))
{
ftrace_graph_stop
();
WARN_ON
(
1
);
/* Might as well panic, otherwise we have no where to go */
*
ret
=
(
unsigned
long
)
panic
;
return
;
}
*
ret
=
current
->
ret_stack
[
index
].
ret
;
trace
->
func
=
current
->
ret_stack
[
index
].
func
;
trace
->
calltime
=
current
->
ret_stack
[
index
].
calltime
;
trace
->
overrun
=
atomic_read
(
&
current
->
trace_overrun
);
trace
->
depth
=
index
;
barrier
();
current
->
curr_ret_stack
--
;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned
long
ftrace_return_to_handler
(
void
)
{
struct
ftrace_graph_ret
trace
;
unsigned
long
ret
;
pop_return_trace
(
&
trace
,
&
ret
);
trace
.
rettime
=
cpu_clock
(
raw_smp_processor_id
());
ftrace_graph_return
(
&
trace
);
if
(
unlikely
(
!
ret
))
{
ftrace_graph_stop
();
WARN_ON
(
1
);
/* Might as well panic. What else to do? */
ret
=
(
unsigned
long
)
panic
;
}
return
ret
;
}
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
...
...
@@ -521,7 +448,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
calltime
=
cpu_clock
(
raw_smp_processor_id
());
if
(
push_return_trace
(
old
,
calltime
,
if
(
ftrace_
push_return_trace
(
old
,
calltime
,
self_addr
,
&
trace
.
depth
)
==
-
EBUSY
)
{
*
parent
=
old
;
return
;
...
...
include/linux/ftrace.h
浏览文件 @
35f88e6b
...
...
@@ -379,6 +379,30 @@ struct ftrace_graph_ret {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct
ftrace_ret_stack
{
unsigned
long
ret
;
unsigned
long
func
;
unsigned
long
long
calltime
;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry_32/64.S
*/
extern
void
return_to_handler
(
void
);
extern
int
ftrace_push_return_trace
(
unsigned
long
ret
,
unsigned
long
long
time
,
unsigned
long
func
,
int
*
depth
);
extern
void
ftrace_pop_return_trace
(
struct
ftrace_graph_ret
*
trace
,
unsigned
long
*
ret
);
/*
* Sometimes we don't want to trace a function with the function
* graph tracer but we want them to keep traced by the usual function
...
...
kernel/trace/trace_functions_graph.c
浏览文件 @
35f88e6b
...
...
@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = {
/* pid on the last trace processed */
static
pid_t
last_pid
[
NR_CPUS
]
=
{
[
0
...
NR_CPUS
-
1
]
=
-
1
};
/* Add a function return address to the trace stack on thread info.*/
int
ftrace_push_return_trace
(
unsigned
long
ret
,
unsigned
long
long
time
,
unsigned
long
func
,
int
*
depth
)
{
int
index
;
if
(
!
current
->
ret_stack
)
return
-
EBUSY
;
/* The return trace stack is full */
if
(
current
->
curr_ret_stack
==
FTRACE_RETFUNC_DEPTH
-
1
)
{
atomic_inc
(
&
current
->
trace_overrun
);
return
-
EBUSY
;
}
index
=
++
current
->
curr_ret_stack
;
barrier
();
current
->
ret_stack
[
index
].
ret
=
ret
;
current
->
ret_stack
[
index
].
func
=
func
;
current
->
ret_stack
[
index
].
calltime
=
time
;
*
depth
=
index
;
return
0
;
}
/* Retrieve a function return address to the trace stack on thread info.*/
void
ftrace_pop_return_trace
(
struct
ftrace_graph_ret
*
trace
,
unsigned
long
*
ret
)
{
int
index
;
index
=
current
->
curr_ret_stack
;
if
(
unlikely
(
index
<
0
))
{
ftrace_graph_stop
();
WARN_ON
(
1
);
/* Might as well panic, otherwise we have no where to go */
*
ret
=
(
unsigned
long
)
panic
;
return
;
}
*
ret
=
current
->
ret_stack
[
index
].
ret
;
trace
->
func
=
current
->
ret_stack
[
index
].
func
;
trace
->
calltime
=
current
->
ret_stack
[
index
].
calltime
;
trace
->
overrun
=
atomic_read
(
&
current
->
trace_overrun
);
trace
->
depth
=
index
;
barrier
();
current
->
curr_ret_stack
--
;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned
long
ftrace_return_to_handler
(
void
)
{
struct
ftrace_graph_ret
trace
;
unsigned
long
ret
;
ftrace_pop_return_trace
(
&
trace
,
&
ret
);
trace
.
rettime
=
cpu_clock
(
raw_smp_processor_id
());
ftrace_graph_return
(
&
trace
);
if
(
unlikely
(
!
ret
))
{
ftrace_graph_stop
();
WARN_ON
(
1
);
/* Might as well panic. What else to do? */
ret
=
(
unsigned
long
)
panic
;
}
return
ret
;
}
static
int
graph_trace_init
(
struct
trace_array
*
tr
)
{
int
cpu
,
ret
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录