Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
8942c2b7
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
8942c2b7
编写于
8月 18, 2016
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'perf/urgent' into perf/core, to pick up dependencies
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
bdfaa2ee
71e7bc2b
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
68 addition
and
27 deletion
+68
-27
kernel/events/core.c
kernel/events/core.c
+68
-27
未找到文件。
kernel/events/core.c
浏览文件 @
8942c2b7
...
...
@@ -242,18 +242,6 @@ static int event_function(void *info)
return
ret
;
}
static
void
event_function_local
(
struct
perf_event
*
event
,
event_f
func
,
void
*
data
)
{
struct
event_function_struct
efs
=
{
.
event
=
event
,
.
func
=
func
,
.
data
=
data
,
};
int
ret
=
event_function
(
&
efs
);
WARN_ON_ONCE
(
ret
);
}
static
void
event_function_call
(
struct
perf_event
*
event
,
event_f
func
,
void
*
data
)
{
struct
perf_event_context
*
ctx
=
event
->
ctx
;
...
...
@@ -303,6 +291,54 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
raw_spin_unlock_irq
(
&
ctx
->
lock
);
}
/*
* Similar to event_function_call() + event_function(), but hard assumes IRQs
* are already disabled and we're on the right CPU.
*/
static
void
event_function_local
(
struct
perf_event
*
event
,
event_f
func
,
void
*
data
)
{
struct
perf_event_context
*
ctx
=
event
->
ctx
;
struct
perf_cpu_context
*
cpuctx
=
__get_cpu_context
(
ctx
);
struct
task_struct
*
task
=
READ_ONCE
(
ctx
->
task
);
struct
perf_event_context
*
task_ctx
=
NULL
;
WARN_ON_ONCE
(
!
irqs_disabled
());
if
(
task
)
{
if
(
task
==
TASK_TOMBSTONE
)
return
;
task_ctx
=
ctx
;
}
perf_ctx_lock
(
cpuctx
,
task_ctx
);
task
=
ctx
->
task
;
if
(
task
==
TASK_TOMBSTONE
)
goto
unlock
;
if
(
task
)
{
/*
* We must be either inactive or active and the right task,
* otherwise we're screwed, since we cannot IPI to somewhere
* else.
*/
if
(
ctx
->
is_active
)
{
if
(
WARN_ON_ONCE
(
task
!=
current
))
goto
unlock
;
if
(
WARN_ON_ONCE
(
cpuctx
->
task_ctx
!=
ctx
))
goto
unlock
;
}
}
else
{
WARN_ON_ONCE
(
&
cpuctx
->
ctx
!=
ctx
);
}
func
(
event
,
cpuctx
,
ctx
,
data
);
unlock:
perf_ctx_unlock
(
cpuctx
,
task_ctx
);
}
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP |\
...
...
@@ -3524,9 +3560,10 @@ static int perf_event_read(struct perf_event *event, bool group)
.
group
=
group
,
.
ret
=
0
,
};
smp_call_function_single
(
event
->
oncpu
,
__perf_event_read
,
&
data
,
1
);
ret
=
data
.
ret
;
ret
=
smp_call_function_single
(
event
->
oncpu
,
__perf_event_read
,
&
data
,
1
);
/* The event must have been read from an online CPU: */
WARN_ON_ONCE
(
ret
);
ret
=
ret
?
:
data
.
ret
;
}
else
if
(
event
->
state
==
PERF_EVENT_STATE_INACTIVE
)
{
struct
perf_event_context
*
ctx
=
event
->
ctx
;
unsigned
long
flags
;
...
...
@@ -6594,15 +6631,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
kfree
(
buf
);
}
/*
* Whether this @filter depends on a dynamic object which is not loaded
* yet or its load addresses are not known.
*/
static
bool
perf_addr_filter_needs_mmap
(
struct
perf_addr_filter
*
filter
)
{
return
filter
->
filter
&&
filter
->
inode
;
}
/*
* Check whether inode and address range match filter criteria.
*/
...
...
@@ -6664,6 +6692,13 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma)
struct
perf_event_context
*
ctx
;
int
ctxn
;
/*
* Data tracing isn't supported yet and as such there is no need
* to keep track of anything that isn't related to executable code:
*/
if
(
!
(
vma
->
vm_flags
&
VM_EXEC
))
return
;
rcu_read_lock
();
for_each_task_context_nr
(
ctxn
)
{
ctx
=
rcu_dereference
(
current
->
perf_event_ctxp
[
ctxn
]);
...
...
@@ -7816,7 +7851,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
list_for_each_entry
(
filter
,
&
ifh
->
list
,
entry
)
{
event
->
addr_filters_offs
[
count
]
=
0
;
if
(
perf_addr_filter_needs_mmap
(
filter
))
/*
* Adjust base offset if the filter is associated to a binary
* that needs to be mapped:
*/
if
(
filter
->
inode
)
event
->
addr_filters_offs
[
count
]
=
perf_addr_filter_apply
(
filter
,
mm
);
...
...
@@ -7947,8 +7986,10 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
goto
fail
;
}
if
(
token
==
IF_SRC_FILE
)
{
filename
=
match_strdup
(
&
args
[
2
]);
if
(
token
==
IF_SRC_FILE
||
token
==
IF_SRC_FILEADDR
)
{
int
fpos
=
filter
->
range
?
2
:
1
;
filename
=
match_strdup
(
&
args
[
fpos
]);
if
(
!
filename
)
{
ret
=
-
ENOMEM
;
goto
fail
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录