Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
42a0789b
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
42a0789b
编写于
12月 04, 2015
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
ac675d0d
4e93ad60
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
41 addition
and
14 deletion
+41
-14
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event.h
+1
-1
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel.c
+1
-1
kernel/events/core.c
kernel/events/core.c
+39
-12
未找到文件。
arch/x86/kernel/cpu/perf_event.h
浏览文件 @
42a0789b
...
...
@@ -391,7 +391,7 @@ struct cpu_hw_events {
/* Check flags and event code/umask, and set the HSW N/A flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
__EVENT_CONSTRAINT(code, n, \
INTEL_ARCH_EVENT_MASK|
INTEL_ARCH_EVENT_MASK
, \
INTEL_ARCH_EVENT_MASK|
X86_ALL_EVENT_FLAGS
, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
...
...
arch/x86/kernel/cpu/perf_event_intel.c
浏览文件 @
42a0789b
...
...
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT
(
0x00c0
,
0
),
/* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT
(
0x003c
,
1
),
/* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT
(
0x0300
,
2
),
/* CPU_CLK_UNHALTED.REF */
INTEL_
EVENT_CONSTRAINT
(
0x48
,
0x4
),
/* L1D_PEND_MISS.*
*/
INTEL_
UEVENT_CONSTRAINT
(
0x148
,
0x4
),
/* L1D_PEND_MISS.PENDING
*/
INTEL_UEVENT_CONSTRAINT
(
0x01c0
,
0x2
),
/* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT
(
0xcd
,
0x8
),
/* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
...
...
kernel/events/core.c
浏览文件 @
42a0789b
...
...
@@ -4225,7 +4225,14 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
goto
retry
;
}
__perf_event_period
(
&
pe
);
if
(
event
->
attr
.
freq
)
{
event
->
attr
.
sample_freq
=
value
;
}
else
{
event
->
attr
.
sample_period
=
value
;
event
->
hw
.
sample_period
=
value
;
}
local64_set
(
&
event
->
hw
.
period_left
,
0
);
raw_spin_unlock_irq
(
&
ctx
->
lock
);
return
0
;
...
...
@@ -5675,6 +5682,17 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
}
}
static
void
perf_event_aux_task_ctx
(
perf_event_aux_output_cb
output
,
void
*
data
,
struct
perf_event_context
*
task_ctx
)
{
rcu_read_lock
();
preempt_disable
();
perf_event_aux_ctx
(
task_ctx
,
output
,
data
);
preempt_enable
();
rcu_read_unlock
();
}
static
void
perf_event_aux
(
perf_event_aux_output_cb
output
,
void
*
data
,
struct
perf_event_context
*
task_ctx
)
...
...
@@ -5684,14 +5702,23 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
struct
pmu
*
pmu
;
int
ctxn
;
/*
* If we have task_ctx != NULL we only notify
* the task context itself. The task_ctx is set
* only for EXIT events before releasing task
* context.
*/
if
(
task_ctx
)
{
perf_event_aux_task_ctx
(
output
,
data
,
task_ctx
);
return
;
}
rcu_read_lock
();
list_for_each_entry_rcu
(
pmu
,
&
pmus
,
entry
)
{
cpuctx
=
get_cpu_ptr
(
pmu
->
pmu_cpu_context
);
if
(
cpuctx
->
unique_pmu
!=
pmu
)
goto
next
;
perf_event_aux_ctx
(
&
cpuctx
->
ctx
,
output
,
data
);
if
(
task_ctx
)
goto
next
;
ctxn
=
pmu
->
task_ctx_nr
;
if
(
ctxn
<
0
)
goto
next
;
...
...
@@ -5701,12 +5728,6 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
next:
put_cpu_ptr
(
pmu
->
pmu_cpu_context
);
}
if
(
task_ctx
)
{
preempt_disable
();
perf_event_aux_ctx
(
task_ctx
,
output
,
data
);
preempt_enable
();
}
rcu_read_unlock
();
}
...
...
@@ -8796,10 +8817,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
struct
perf_event_context
*
child_ctx
,
*
clone_ctx
=
NULL
;
unsigned
long
flags
;
if
(
likely
(
!
child
->
perf_event_ctxp
[
ctxn
]))
{
perf_event_task
(
child
,
NULL
,
0
);
if
(
likely
(
!
child
->
perf_event_ctxp
[
ctxn
]))
return
;
}
local_irq_save
(
flags
);
/*
...
...
@@ -8883,6 +8902,14 @@ void perf_event_exit_task(struct task_struct *child)
for_each_task_context_nr
(
ctxn
)
perf_event_exit_task_context
(
child
,
ctxn
);
/*
* The perf_event_exit_task_context calls perf_event_task
* with child's task_ctx, which generates EXIT events for
* child contexts and sets child->perf_event_ctxp[] to NULL.
* At this point we need to send EXIT events to cpu contexts.
*/
perf_event_task
(
child
,
NULL
,
0
);
}
static
void
perf_free_event
(
struct
perf_event
*
event
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录