Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
0cc4bd8f
K
Kernel
项目概览
openeuler
/
Kernel
接近 2 年 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
0cc4bd8f
编写于
1月 28, 2020
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'core/kprobes' into perf/core, to pick up fixes
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
56ee04aa
31537cf8
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
45 addition
and
25 deletion
+45
-25
kernel/kprobes.c
kernel/kprobes.c
+43
-24
kernel/trace/trace_syscalls.c
kernel/trace/trace_syscalls.c
+2
-1
未找到文件。
kernel/kprobes.c
浏览文件 @
0cc4bd8f
...
...
@@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
mutex_unlock
(
&
kprobe_mutex
);
}
static
bool
optprobe_queued_unopt
(
struct
optimized_kprobe
*
op
)
{
struct
optimized_kprobe
*
_op
;
list_for_each_entry
(
_op
,
&
unoptimizing_list
,
list
)
{
if
(
op
==
_op
)
return
true
;
}
return
false
;
}
/* Optimize kprobe if p is ready to be optimized */
static
void
optimize_kprobe
(
struct
kprobe
*
p
)
{
...
...
@@ -633,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
return
;
/* Check if it is already optimized. */
if
(
op
->
kp
.
flags
&
KPROBE_FLAG_OPTIMIZED
)
if
(
op
->
kp
.
flags
&
KPROBE_FLAG_OPTIMIZED
)
{
if
(
optprobe_queued_unopt
(
op
))
{
/* This is under unoptimizing. Just dequeue the probe */
list_del_init
(
&
op
->
list
);
}
return
;
}
op
->
kp
.
flags
|=
KPROBE_FLAG_OPTIMIZED
;
if
(
!
list_empty
(
&
op
->
list
))
/* This is under unoptimizing. Just dequeue the probe */
list_del_init
(
&
op
->
list
);
else
{
list_add
(
&
op
->
list
,
&
optimizing_list
);
kick_kprobe_optimizer
();
}
/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
if
(
WARN_ON_ONCE
(
!
list_empty
(
&
op
->
list
)))
return
;
list_add
(
&
op
->
list
,
&
optimizing_list
);
kick_kprobe_optimizer
();
}
/* Short cut to direct unoptimizing */
...
...
@@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
return
;
/* This is not an optprobe nor optimized */
op
=
container_of
(
p
,
struct
optimized_kprobe
,
kp
);
if
(
!
kprobe_optimized
(
p
))
{
/* Unoptimized or unoptimizing case */
if
(
force
&&
!
list_empty
(
&
op
->
list
))
{
/*
* Only if this is unoptimizing kprobe and forced,
* forcibly unoptimize it. (No need to unoptimize
* unoptimized kprobe again :)
*/
list_del_init
(
&
op
->
list
);
force_unoptimize_kprobe
(
op
);
}
if
(
!
kprobe_optimized
(
p
))
return
;
}
if
(
!
list_empty
(
&
op
->
list
))
{
/* Dequeue from the optimization queue */
list_del_init
(
&
op
->
list
);
if
(
optprobe_queued_unopt
(
op
))
{
/* Queued in unoptimizing queue */
if
(
force
)
{
/*
* Forcibly unoptimize the kprobe here, and queue it
* in the freeing list for release afterwards.
*/
force_unoptimize_kprobe
(
op
);
list_move
(
&
op
->
list
,
&
freeing_list
);
}
}
else
{
/* Dequeue from the optimizing queue */
list_del_init
(
&
op
->
list
);
op
->
kp
.
flags
&=
~
KPROBE_FLAG_OPTIMIZED
;
}
return
;
}
/* Optimized kprobe case */
if
(
force
)
if
(
force
)
{
/* Forcibly update the code: this is a special case */
force_unoptimize_kprobe
(
op
);
else
{
}
else
{
list_add
(
&
op
->
list
,
&
unoptimizing_list
);
kick_kprobe_optimizer
();
}
...
...
kernel/trace/trace_syscalls.c
浏览文件 @
0cc4bd8f
...
...
@@ -274,7 +274,8 @@ static int __init syscall_enter_define_fields(struct trace_event_call *call)
struct
syscall_trace_enter
trace
;
struct
syscall_metadata
*
meta
=
call
->
data
;
int
offset
=
offsetof
(
typeof
(
trace
),
args
);
int
ret
,
i
;
int
ret
=
0
;
int
i
;
for
(
i
=
0
;
i
<
meta
->
nb_args
;
i
++
)
{
ret
=
trace_define_field
(
call
,
meta
->
types
[
i
],
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录