Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
d652f4bb
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d652f4bb
编写于
3月 28, 2017
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
e3a6a624
a01851fa
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
63 addition
and
19 deletion
+63
-19
arch/x86/events/core.c
arch/x86/events/core.c
+14
-2
kernel/events/core.c
kernel/events/core.c
+48
-16
tools/perf/util/symbol.c
tools/perf/util/symbol.c
+1
-1
未找到文件。
arch/x86/events/core.c
浏览文件 @
d652f4bb
...
...
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
static
void
refresh_pce
(
void
*
ignored
)
{
if
(
current
->
mm
)
load_mm_cr4
(
current
->
mm
);
if
(
current
->
active_
mm
)
load_mm_cr4
(
current
->
active_
mm
);
}
static
void
x86_pmu_event_mapped
(
struct
perf_event
*
event
)
...
...
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
if
(
!
(
event
->
hw
.
flags
&
PERF_X86_EVENT_RDPMC_ALLOWED
))
return
;
/*
* This function relies on not being called concurrently in two
* tasks in the same mm. Otherwise one task could observe
* perf_rdpmc_allowed > 1 and return all the way back to
* userspace with CR4.PCE clear while another task is still
* doing on_each_cpu_mask() to propagate CR4.PCE.
*
* For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution.
*/
lockdep_assert_held_exclusive
(
&
current
->
mm
->
mmap_sem
);
if
(
atomic_inc_return
(
&
current
->
mm
->
context
.
perf_rdpmc_allowed
)
==
1
)
on_each_cpu_mask
(
mm_cpumask
(
current
->
mm
),
refresh_pce
,
NULL
,
1
);
}
...
...
kernel/events/core.c
浏览文件 @
d652f4bb
...
...
@@ -4261,7 +4261,7 @@ int perf_event_release_kernel(struct perf_event *event)
raw_spin_lock_irq
(
&
ctx
->
lock
);
/*
* Mark this even as STATE_DEAD, there is no external reference to it
* Mark this even
t
as STATE_DEAD, there is no external reference to it
* anymore.
*
* Anybody acquiring event->child_mutex after the below loop _must_
...
...
@@ -10556,21 +10556,22 @@ void perf_event_free_task(struct task_struct *task)
continue
;
mutex_lock
(
&
ctx
->
mutex
);
again:
list_for_each_entry_safe
(
event
,
tmp
,
&
ctx
->
pinned_groups
,
group_entry
)
perf_free_event
(
event
,
ctx
);
raw_spin_lock_irq
(
&
ctx
->
lock
);
/*
* Destroy the task <-> ctx relation and mark the context dead.
*
* This is important because even though the task hasn't been
* exposed yet the context has been (through child_list).
*/
RCU_INIT_POINTER
(
task
->
perf_event_ctxp
[
ctxn
],
NULL
);
WRITE_ONCE
(
ctx
->
task
,
TASK_TOMBSTONE
);
put_task_struct
(
task
);
/* cannot be last */
raw_spin_unlock_irq
(
&
ctx
->
lock
);
list_for_each_entry_safe
(
event
,
tmp
,
&
ctx
->
flexible_groups
,
group_entry
)
list_for_each_entry_safe
(
event
,
tmp
,
&
ctx
->
event_list
,
event_entry
)
perf_free_event
(
event
,
ctx
);
if
(
!
list_empty
(
&
ctx
->
pinned_groups
)
||
!
list_empty
(
&
ctx
->
flexible_groups
))
goto
again
;
mutex_unlock
(
&
ctx
->
mutex
);
put_ctx
(
ctx
);
}
}
...
...
@@ -10608,7 +10609,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
}
/*
* inherit a event from parent task to child task:
* Inherit a event from parent task to child task.
*
* Returns:
* - valid pointer on success
* - NULL for orphaned events
* - IS_ERR() on error
*/
static
struct
perf_event
*
inherit_event
(
struct
perf_event
*
parent_event
,
...
...
@@ -10702,6 +10708,16 @@ inherit_event(struct perf_event *parent_event,
return
child_event
;
}
/*
* Inherits an event group.
*
* This will quietly suppress orphaned events; !inherit_event() is not an error.
* This matches with perf_event_release_kernel() removing all child events.
*
* Returns:
* - 0 on success
* - <0 on error
*/
static
int
inherit_group
(
struct
perf_event
*
parent_event
,
struct
task_struct
*
parent
,
struct
perf_event_context
*
parent_ctx
,
...
...
@@ -10716,6 +10732,11 @@ static int inherit_group(struct perf_event *parent_event,
child
,
NULL
,
child_ctx
);
if
(
IS_ERR
(
leader
))
return
PTR_ERR
(
leader
);
/*
* @leader can be NULL here because of is_orphaned_event(). In this
* case inherit_event() will create individual events, similar to what
* perf_group_detach() would do anyway.
*/
list_for_each_entry
(
sub
,
&
parent_event
->
sibling_list
,
group_entry
)
{
child_ctr
=
inherit_event
(
sub
,
parent
,
parent_ctx
,
child
,
leader
,
child_ctx
);
...
...
@@ -10725,6 +10746,17 @@ static int inherit_group(struct perf_event *parent_event,
return
0
;
}
/*
* Creates the child task context and tries to inherit the event-group.
*
* Clears @inherited_all on !attr.inherited or error. Note that we'll leave
* inherited_all set when we 'fail' to inherit an orphaned event; this is
* consistent with perf_event_release_kernel() removing all child events.
*
* Returns:
* - 0 on success
* - <0 on error
*/
static
int
inherit_task_group
(
struct
perf_event
*
event
,
struct
task_struct
*
parent
,
struct
perf_event_context
*
parent_ctx
,
...
...
@@ -10747,7 +10779,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
* First allocate and initialize a context for the
* child.
*/
child_ctx
=
alloc_perf_context
(
parent_ctx
->
pmu
,
child
);
if
(
!
child_ctx
)
return
-
ENOMEM
;
...
...
@@ -10809,7 +10840,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret
=
inherit_task_group
(
event
,
parent
,
parent_ctx
,
child
,
ctxn
,
&
inherited_all
);
if
(
ret
)
brea
k
;
goto
out_unloc
k
;
}
/*
...
...
@@ -10825,7 +10856,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret
=
inherit_task_group
(
event
,
parent
,
parent_ctx
,
child
,
ctxn
,
&
inherited_all
);
if
(
ret
)
brea
k
;
goto
out_unloc
k
;
}
raw_spin_lock_irqsave
(
&
parent_ctx
->
lock
,
flags
);
...
...
@@ -10853,6 +10884,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
}
raw_spin_unlock_irqrestore
(
&
parent_ctx
->
lock
,
flags
);
out_unlock:
mutex_unlock
(
&
parent_ctx
->
mutex
);
perf_unpin_context
(
parent_ctx
);
...
...
tools/perf/util/symbol.c
浏览文件 @
d652f4bb
...
...
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
/* Last entry */
if
(
curr
->
end
==
curr
->
start
)
curr
->
end
=
roundup
(
curr
->
start
,
4096
);
curr
->
end
=
roundup
(
curr
->
start
,
4096
)
+
4096
;
}
void
__map_groups__fixup_end
(
struct
map_groups
*
mg
,
enum
map_type
type
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录