Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
f92f6e6e
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
f92f6e6e
编写于
10月 16, 2010
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'core' of
git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile
into perf/core
上级
66af86e2
cd254f29
变更
18
显示空白变更内容
内联
并排
Showing
18 changed file
with
510 addition
and
510 deletion
+510
-510
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event.c
+6
-0
arch/arm/oprofile/Makefile
arch/arm/oprofile/Makefile
+4
-0
arch/arm/oprofile/common.c
arch/arm/oprofile/common.c
+6
-305
arch/sh/Kconfig
arch/sh/Kconfig
+13
-0
arch/sh/kernel/perf_event.c
arch/sh/kernel/perf_event.c
+18
-0
arch/sh/oprofile/Makefile
arch/sh/oprofile/Makefile
+4
-0
arch/sh/oprofile/common.c
arch/sh/oprofile/common.c
+23
-92
arch/sh/oprofile/op_impl.h
arch/sh/oprofile/op_impl.h
+0
-33
arch/x86/oprofile/backtrace.c
arch/x86/oprofile/backtrace.c
+59
-11
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/nmi_int.c
+1
-8
drivers/oprofile/oprof.c
drivers/oprofile/oprof.c
+8
-24
drivers/oprofile/oprof.h
drivers/oprofile/oprof.h
+1
-1
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofile_files.c
+5
-2
drivers/oprofile/oprofile_perf.c
drivers/oprofile/oprofile_perf.c
+328
-0
drivers/oprofile/oprofilefs.c
drivers/oprofile/oprofilefs.c
+20
-34
include/linux/oprofile.h
include/linux/oprofile.h
+7
-0
include/linux/perf_event.h
include/linux/perf_event.h
+2
-0
kernel/perf_event.c
kernel/perf_event.c
+5
-0
未找到文件。
arch/arm/kernel/perf_event.c
浏览文件 @
f92f6e6e
...
...
@@ -123,6 +123,12 @@ armpmu_get_max_events(void)
}
EXPORT_SYMBOL_GPL
(
armpmu_get_max_events
);
int
perf_num_counters
(
void
)
{
return
armpmu_get_max_events
();
}
EXPORT_SYMBOL_GPL
(
perf_num_counters
);
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) \
...
...
arch/arm/oprofile/Makefile
浏览文件 @
f92f6e6e
...
...
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o
\
timer_int.o
)
ifeq
($(CONFIG_HW_PERF_EVENTS),y)
DRIVER_OBJS
+=
$(
addprefix
../../../drivers/oprofile/, oprofile_perf.o
)
endif
oprofile-y
:=
$(DRIVER_OBJS)
common.o
arch/arm/oprofile/common.c
浏览文件 @
f92f6e6e
...
...
@@ -25,139 +25,10 @@
#include <asm/ptrace.h>
#ifdef CONFIG_HW_PERF_EVENTS
/*
* Per performance monitor configuration as set via oprofilefs.
*/
struct
op_counter_config
{
unsigned
long
count
;
unsigned
long
enabled
;
unsigned
long
event
;
unsigned
long
unit_mask
;
unsigned
long
kernel
;
unsigned
long
user
;
struct
perf_event_attr
attr
;
};
static
int
op_arm_enabled
;
static
DEFINE_MUTEX
(
op_arm_mutex
);
static
struct
op_counter_config
*
counter_config
;
static
struct
perf_event
**
perf_events
[
nr_cpumask_bits
];
static
int
perf_num_counters
;
/*
* Overflow callback for oprofile.
*/
static
void
op_overflow_handler
(
struct
perf_event
*
event
,
int
unused
,
struct
perf_sample_data
*
data
,
struct
pt_regs
*
regs
)
{
int
id
;
u32
cpu
=
smp_processor_id
();
for
(
id
=
0
;
id
<
perf_num_counters
;
++
id
)
if
(
perf_events
[
cpu
][
id
]
==
event
)
break
;
if
(
id
!=
perf_num_counters
)
oprofile_add_sample
(
regs
,
id
);
else
pr_warning
(
"oprofile: ignoring spurious overflow "
"on cpu %u
\n
"
,
cpu
);
}
/*
* Called by op_arm_setup to create perf attributes to mirror the oprofile
* settings in counter_config. Attributes are created as `pinned' events and
* so are permanently scheduled on the PMU.
*/
static
void
op_perf_setup
(
void
)
char
*
op_name_from_perf_id
(
void
)
{
int
i
;
u32
size
=
sizeof
(
struct
perf_event_attr
);
struct
perf_event_attr
*
attr
;
for
(
i
=
0
;
i
<
perf_num_counters
;
++
i
)
{
attr
=
&
counter_config
[
i
].
attr
;
memset
(
attr
,
0
,
size
);
attr
->
type
=
PERF_TYPE_RAW
;
attr
->
size
=
size
;
attr
->
config
=
counter_config
[
i
].
event
;
attr
->
sample_period
=
counter_config
[
i
].
count
;
attr
->
pinned
=
1
;
}
}
static
int
op_create_counter
(
int
cpu
,
int
event
)
{
int
ret
=
0
;
struct
perf_event
*
pevent
;
if
(
!
counter_config
[
event
].
enabled
||
(
perf_events
[
cpu
][
event
]
!=
NULL
))
return
ret
;
enum
arm_perf_pmu_ids
id
=
armpmu_get_pmu_id
();
pevent
=
perf_event_create_kernel_counter
(
&
counter_config
[
event
].
attr
,
cpu
,
NULL
,
op_overflow_handler
);
if
(
IS_ERR
(
pevent
))
{
ret
=
PTR_ERR
(
pevent
);
}
else
if
(
pevent
->
state
!=
PERF_EVENT_STATE_ACTIVE
)
{
perf_event_release_kernel
(
pevent
);
pr_warning
(
"oprofile: failed to enable event %d "
"on CPU %d
\n
"
,
event
,
cpu
);
ret
=
-
EBUSY
;
}
else
{
perf_events
[
cpu
][
event
]
=
pevent
;
}
return
ret
;
}
static
void
op_destroy_counter
(
int
cpu
,
int
event
)
{
struct
perf_event
*
pevent
=
perf_events
[
cpu
][
event
];
if
(
pevent
)
{
perf_event_release_kernel
(
pevent
);
perf_events
[
cpu
][
event
]
=
NULL
;
}
}
/*
* Called by op_arm_start to create active perf events based on the
* perviously configured attributes.
*/
static
int
op_perf_start
(
void
)
{
int
cpu
,
event
,
ret
=
0
;
for_each_online_cpu
(
cpu
)
{
for
(
event
=
0
;
event
<
perf_num_counters
;
++
event
)
{
ret
=
op_create_counter
(
cpu
,
event
);
if
(
ret
)
goto
out
;
}
}
out:
return
ret
;
}
/*
* Called by op_arm_stop at the end of a profiling run.
*/
static
void
op_perf_stop
(
void
)
{
int
cpu
,
event
;
for_each_online_cpu
(
cpu
)
for
(
event
=
0
;
event
<
perf_num_counters
;
++
event
)
op_destroy_counter
(
cpu
,
event
);
}
static
char
*
op_name_from_perf_id
(
enum
arm_perf_pmu_ids
id
)
{
switch
(
id
)
{
case
ARM_PERF_PMU_ID_XSCALE1
:
return
"arm/xscale1"
;
...
...
@@ -176,116 +47,6 @@ static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
}
}
static
int
op_arm_create_files
(
struct
super_block
*
sb
,
struct
dentry
*
root
)
{
unsigned
int
i
;
for
(
i
=
0
;
i
<
perf_num_counters
;
i
++
)
{
struct
dentry
*
dir
;
char
buf
[
4
];
snprintf
(
buf
,
sizeof
buf
,
"%d"
,
i
);
dir
=
oprofilefs_mkdir
(
sb
,
root
,
buf
);
oprofilefs_create_ulong
(
sb
,
dir
,
"enabled"
,
&
counter_config
[
i
].
enabled
);
oprofilefs_create_ulong
(
sb
,
dir
,
"event"
,
&
counter_config
[
i
].
event
);
oprofilefs_create_ulong
(
sb
,
dir
,
"count"
,
&
counter_config
[
i
].
count
);
oprofilefs_create_ulong
(
sb
,
dir
,
"unit_mask"
,
&
counter_config
[
i
].
unit_mask
);
oprofilefs_create_ulong
(
sb
,
dir
,
"kernel"
,
&
counter_config
[
i
].
kernel
);
oprofilefs_create_ulong
(
sb
,
dir
,
"user"
,
&
counter_config
[
i
].
user
);
}
return
0
;
}
static
int
op_arm_setup
(
void
)
{
spin_lock
(
&
oprofilefs_lock
);
op_perf_setup
();
spin_unlock
(
&
oprofilefs_lock
);
return
0
;
}
static
int
op_arm_start
(
void
)
{
int
ret
=
-
EBUSY
;
mutex_lock
(
&
op_arm_mutex
);
if
(
!
op_arm_enabled
)
{
ret
=
0
;
op_perf_start
();
op_arm_enabled
=
1
;
}
mutex_unlock
(
&
op_arm_mutex
);
return
ret
;
}
static
void
op_arm_stop
(
void
)
{
mutex_lock
(
&
op_arm_mutex
);
if
(
op_arm_enabled
)
op_perf_stop
();
op_arm_enabled
=
0
;
mutex_unlock
(
&
op_arm_mutex
);
}
#ifdef CONFIG_PM
static
int
op_arm_suspend
(
struct
platform_device
*
dev
,
pm_message_t
state
)
{
mutex_lock
(
&
op_arm_mutex
);
if
(
op_arm_enabled
)
op_perf_stop
();
mutex_unlock
(
&
op_arm_mutex
);
return
0
;
}
static
int
op_arm_resume
(
struct
platform_device
*
dev
)
{
mutex_lock
(
&
op_arm_mutex
);
if
(
op_arm_enabled
&&
op_perf_start
())
op_arm_enabled
=
0
;
mutex_unlock
(
&
op_arm_mutex
);
return
0
;
}
static
struct
platform_driver
oprofile_driver
=
{
.
driver
=
{
.
name
=
"arm-oprofile"
,
},
.
resume
=
op_arm_resume
,
.
suspend
=
op_arm_suspend
,
};
static
struct
platform_device
*
oprofile_pdev
;
static
int
__init
init_driverfs
(
void
)
{
int
ret
;
ret
=
platform_driver_register
(
&
oprofile_driver
);
if
(
ret
)
goto
out
;
oprofile_pdev
=
platform_device_register_simple
(
oprofile_driver
.
driver
.
name
,
0
,
NULL
,
0
);
if
(
IS_ERR
(
oprofile_pdev
))
{
ret
=
PTR_ERR
(
oprofile_pdev
);
platform_driver_unregister
(
&
oprofile_driver
);
}
out:
return
ret
;
}
static
void
exit_driverfs
(
void
)
{
platform_device_unregister
(
oprofile_pdev
);
platform_driver_unregister
(
&
oprofile_driver
);
}
#else
static
int
__init
init_driverfs
(
void
)
{
return
0
;
}
#define exit_driverfs() do { } while (0)
#endif
/* CONFIG_PM */
static
int
report_trace
(
struct
stackframe
*
frame
,
void
*
d
)
{
unsigned
int
*
depth
=
d
;
...
...
@@ -350,74 +111,14 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
int
__init
oprofile_arch_init
(
struct
oprofile_operations
*
ops
)
{
int
cpu
,
ret
=
0
;
perf_num_counters
=
armpmu_get_max_events
();
counter_config
=
kcalloc
(
perf_num_counters
,
sizeof
(
struct
op_counter_config
),
GFP_KERNEL
);
if
(
!
counter_config
)
{
pr_info
(
"oprofile: failed to allocate %d "
"counters
\n
"
,
perf_num_counters
);
return
-
ENOMEM
;
}
ret
=
init_driverfs
();
if
(
ret
)
{
kfree
(
counter_config
);
counter_config
=
NULL
;
return
ret
;
}
for_each_possible_cpu
(
cpu
)
{
perf_events
[
cpu
]
=
kcalloc
(
perf_num_counters
,
sizeof
(
struct
perf_event
*
),
GFP_KERNEL
);
if
(
!
perf_events
[
cpu
])
{
pr_info
(
"oprofile: failed to allocate %d perf events "
"for cpu %d
\n
"
,
perf_num_counters
,
cpu
);
while
(
--
cpu
>=
0
)
kfree
(
perf_events
[
cpu
]);
return
-
ENOMEM
;
}
}
ops
->
backtrace
=
arm_backtrace
;
ops
->
create_files
=
op_arm_create_files
;
ops
->
setup
=
op_arm_setup
;
ops
->
start
=
op_arm_start
;
ops
->
stop
=
op_arm_stop
;
ops
->
shutdown
=
op_arm_stop
;
ops
->
cpu_type
=
op_name_from_perf_id
(
armpmu_get_pmu_id
());
if
(
!
ops
->
cpu_type
)
ret
=
-
ENODEV
;
else
pr_info
(
"oprofile: using %s
\n
"
,
ops
->
cpu_type
);
return
ret
;
return
oprofile_perf_init
(
ops
)
;
}
void
oprofile_arch_exit
(
void
)
void
__exit
oprofile_arch_exit
(
void
)
{
int
cpu
,
id
;
struct
perf_event
*
event
;
if
(
*
perf_events
)
{
for_each_possible_cpu
(
cpu
)
{
for
(
id
=
0
;
id
<
perf_num_counters
;
++
id
)
{
event
=
perf_events
[
cpu
][
id
];
if
(
event
!=
NULL
)
perf_event_release_kernel
(
event
);
}
kfree
(
perf_events
[
cpu
]);
}
}
if
(
counter_config
)
{
kfree
(
counter_config
);
exit_driverfs
();
}
oprofile_perf_exit
();
}
#else
int
__init
oprofile_arch_init
(
struct
oprofile_operations
*
ops
)
...
...
@@ -425,5 +126,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
pr_info
(
"oprofile: hardware counters not available
\n
"
);
return
-
ENODEV
;
}
void
oprofile_arch_exit
(
void
)
{}
void
__exit
oprofile_arch_exit
(
void
)
{}
#endif
/* CONFIG_HW_PERF_EVENTS */
arch/sh/Kconfig
浏览文件 @
f92f6e6e
...
...
@@ -249,6 +249,11 @@ config ARCH_SHMOBILE
select PM
select PM_RUNTIME
config CPU_HAS_PMU
depends on CPU_SH4 || CPU_SH4A
default y
bool
if SUPERH32
choice
...
...
@@ -738,6 +743,14 @@ config GUSA_RB
LLSC, this should be more efficient than the other alternative of
disabling interrupts around the atomic sequence.
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS && CPU_HAS_PMU
default y
help
Enable hardware performance counter support for perf events. If
disabled, perf events will use software events only.
source "drivers/sh/Kconfig"
endmenu
...
...
arch/sh/kernel/perf_event.c
浏览文件 @
f92f6e6e
...
...
@@ -59,6 +59,24 @@ static inline int sh_pmu_initialized(void)
return
!!
sh_pmu
;
}
const
char
*
perf_pmu_name
(
void
)
{
if
(
!
sh_pmu
)
return
NULL
;
return
sh_pmu
->
name
;
}
EXPORT_SYMBOL_GPL
(
perf_pmu_name
);
int
perf_num_counters
(
void
)
{
if
(
!
sh_pmu
)
return
0
;
return
sh_pmu
->
num_events
;
}
EXPORT_SYMBOL_GPL
(
perf_num_counters
);
/*
* Release the PMU if this is the last perf_event.
*/
...
...
arch/sh/oprofile/Makefile
浏览文件 @
f92f6e6e
...
...
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o
\
timer_int.o
)
ifeq
($(CONFIG_HW_PERF_EVENTS),y)
DRIVER_OBJS
+=
$(
addprefix
../../../drivers/oprofile/, oprofile_perf.o
)
endif
oprofile-y
:=
$(DRIVER_OBJS)
common.o backtrace.o
arch/sh/oprofile/common.c
浏览文件 @
f92f6e6e
...
...
@@ -17,114 +17,45 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <asm/processor.h>
#include "op_impl.h"
static
struct
op_sh_model
*
model
;
static
struct
op_counter_config
ctr
[
20
];
#ifdef CONFIG_HW_PERF_EVENTS
extern
void
sh_backtrace
(
struct
pt_regs
*
const
regs
,
unsigned
int
depth
);
static
int
op_sh_setup
(
void
)
{
/* Pre-compute the values to stuff in the hardware registers. */
model
->
reg_setup
(
ctr
);
/* Configure the registers on all cpus. */
on_each_cpu
(
model
->
cpu_setup
,
NULL
,
1
);
return
0
;
}
static
int
op_sh_create_files
(
struct
super_block
*
sb
,
struct
dentry
*
root
)
char
*
op_name_from_perf_id
(
void
)
{
int
i
,
ret
=
0
;
for
(
i
=
0
;
i
<
model
->
num_counters
;
i
++
)
{
struct
dentry
*
dir
;
char
buf
[
4
];
const
char
*
pmu
;
char
buf
[
20
];
int
size
;
snprintf
(
buf
,
sizeof
(
buf
),
"%d"
,
i
);
dir
=
oprofilefs_mkdir
(
sb
,
root
,
buf
);
pmu
=
perf_pmu_name
();
if
(
!
pmu
)
return
NULL
;
ret
|=
oprofilefs_create_ulong
(
sb
,
dir
,
"enabled"
,
&
ctr
[
i
].
enabled
);
ret
|=
oprofilefs_create_ulong
(
sb
,
dir
,
"event"
,
&
ctr
[
i
].
event
);
ret
|=
oprofilefs_create_ulong
(
sb
,
dir
,
"kernel"
,
&
ctr
[
i
].
kernel
);
ret
|=
oprofilefs_create_ulong
(
sb
,
dir
,
"user"
,
&
ctr
[
i
].
user
);
size
=
snprintf
(
buf
,
sizeof
(
buf
),
"sh/%s"
,
pmu
);
if
(
size
>
-
1
&&
size
<
sizeof
(
buf
))
return
buf
;
if
(
model
->
create_files
)
ret
|=
model
->
create_files
(
sb
,
dir
);
else
ret
|=
oprofilefs_create_ulong
(
sb
,
dir
,
"count"
,
&
ctr
[
i
].
count
);
/* Dummy entries */
ret
|=
oprofilefs_create_ulong
(
sb
,
dir
,
"unit_mask"
,
&
ctr
[
i
].
unit_mask
);
}
return
ret
;
return
NULL
;
}
static
int
op_sh_start
(
void
)
int
__init
oprofile_arch_init
(
struct
oprofile_operations
*
ops
)
{
/* Enable performance monitoring for all counters. */
on_each_cpu
(
model
->
cpu_start
,
NULL
,
1
);
ops
->
backtrace
=
sh_backtrace
;
return
0
;
return
oprofile_perf_init
(
ops
)
;
}
static
void
op_sh_stop
(
void
)
void
__exit
oprofile_arch_exit
(
void
)
{
/* Disable performance monitoring for all counters. */
on_each_cpu
(
model
->
cpu_stop
,
NULL
,
1
);
oprofile_perf_exit
();
}
#else
int
__init
oprofile_arch_init
(
struct
oprofile_operations
*
ops
)
{
struct
op_sh_model
*
lmodel
=
NULL
;
int
ret
;
/*
* Always assign the backtrace op. If the counter initialization
* fails, we fall back to the timer which will still make use of
* this.
*/
ops
->
backtrace
=
sh_backtrace
;
/*
* XXX
*
* All of the SH7750/SH-4A counters have been converted to perf,
* this infrastructure hook is left for other users until they've
* had a chance to convert over, at which point all of this
* will be deleted.
*/
if
(
!
lmodel
)
return
-
ENODEV
;
if
(
!
(
current_cpu_data
.
flags
&
CPU_HAS_PERF_COUNTER
))
pr_info
(
"oprofile: hardware counters not available
\n
"
);
return
-
ENODEV
;
ret
=
lmodel
->
init
();
if
(
unlikely
(
ret
!=
0
))
return
ret
;
model
=
lmodel
;
ops
->
setup
=
op_sh_setup
;
ops
->
create_files
=
op_sh_create_files
;
ops
->
start
=
op_sh_start
;
ops
->
stop
=
op_sh_stop
;
ops
->
cpu_type
=
lmodel
->
cpu_type
;
printk
(
KERN_INFO
"oprofile: using %s performance monitoring.
\n
"
,
lmodel
->
cpu_type
);
return
0
;
}
void
oprofile_arch_exit
(
void
)
{
if
(
model
&&
model
->
exit
)
model
->
exit
();
}
void
__exit
oprofile_arch_exit
(
void
)
{}
#endif
/* CONFIG_HW_PERF_EVENTS */
arch/sh/oprofile/op_impl.h
已删除
100644 → 0
浏览文件 @
66af86e2
#ifndef __OP_IMPL_H
#define __OP_IMPL_H
/* Per-counter configuration as set via oprofilefs. */
struct
op_counter_config
{
unsigned
long
enabled
;
unsigned
long
event
;
unsigned
long
count
;
/* Dummy values for userspace tool compliance */
unsigned
long
kernel
;
unsigned
long
user
;
unsigned
long
unit_mask
;
};
/* Per-architecture configury and hooks. */
struct
op_sh_model
{
void
(
*
reg_setup
)(
struct
op_counter_config
*
);
int
(
*
create_files
)(
struct
super_block
*
sb
,
struct
dentry
*
dir
);
void
(
*
cpu_setup
)(
void
*
dummy
);
int
(
*
init
)(
void
);
void
(
*
exit
)(
void
);
void
(
*
cpu_start
)(
void
*
args
);
void
(
*
cpu_stop
)(
void
*
args
);
char
*
cpu_type
;
unsigned
char
num_counters
;
};
/* arch/sh/oprofile/common.c */
extern
void
sh_backtrace
(
struct
pt_regs
*
const
regs
,
unsigned
int
depth
);
#endif
/* __OP_IMPL_H */
arch/x86/oprofile/backtrace.c
浏览文件 @
f92f6e6e
...
...
@@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include <asm/stacktrace.h>
#include <linux/compat.h>
static
void
backtrace_warning_symbol
(
void
*
data
,
char
*
msg
,
unsigned
long
symbol
)
...
...
@@ -48,14 +49,12 @@ static struct stacktrace_ops backtrace_ops = {
.
walk_stack
=
print_context_stack
,
};
struct
frame_head
{
struct
frame_head
*
bp
;
unsigned
long
ret
;
}
__attribute__
((
packed
));
static
struct
frame_head
*
dump_user_backtrace
(
struct
frame_head
*
head
)
#ifdef CONFIG_COMPAT
static
struct
stack_frame_ia32
*
dump_user_backtrace_32
(
struct
stack_frame_ia32
*
head
)
{
struct
frame_head
bufhead
[
2
];
struct
stack_frame_ia32
bufhead
[
2
];
struct
stack_frame_ia32
*
fp
;
/* Also check accessibility of one struct frame_head beyond */
if
(
!
access_ok
(
VERIFY_READ
,
head
,
sizeof
(
bufhead
)))
...
...
@@ -63,20 +62,66 @@ static struct frame_head *dump_user_backtrace(struct frame_head *head)
if
(
__copy_from_user_inatomic
(
bufhead
,
head
,
sizeof
(
bufhead
)))
return
NULL
;
oprofile_add_trace
(
bufhead
[
0
].
ret
);
fp
=
(
struct
stack_frame_ia32
*
)
compat_ptr
(
bufhead
[
0
].
next_frame
);
oprofile_add_trace
(
bufhead
[
0
].
return_address
);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if
(
head
>=
fp
)
return
NULL
;
return
fp
;
}
static
inline
int
x86_backtrace_32
(
struct
pt_regs
*
const
regs
,
unsigned
int
depth
)
{
struct
stack_frame_ia32
*
head
;
/* User process is 32-bit */
if
(
!
current
||
!
test_thread_flag
(
TIF_IA32
))
return
0
;
head
=
(
struct
stack_frame_ia32
*
)
regs
->
bp
;
while
(
depth
--
&&
head
)
head
=
dump_user_backtrace_32
(
head
);
return
1
;
}
#else
static
inline
int
x86_backtrace_32
(
struct
pt_regs
*
const
regs
,
unsigned
int
depth
)
{
return
0
;
}
#endif
/* CONFIG_COMPAT */
static
struct
stack_frame
*
dump_user_backtrace
(
struct
stack_frame
*
head
)
{
struct
stack_frame
bufhead
[
2
];
/* Also check accessibility of one struct stack_frame beyond */
if
(
!
access_ok
(
VERIFY_READ
,
head
,
sizeof
(
bufhead
)))
return
NULL
;
if
(
__copy_from_user_inatomic
(
bufhead
,
head
,
sizeof
(
bufhead
)))
return
NULL
;
oprofile_add_trace
(
bufhead
[
0
].
return_address
);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if
(
head
>=
bufhead
[
0
].
bp
)
if
(
head
>=
bufhead
[
0
].
next_frame
)
return
NULL
;
return
bufhead
[
0
].
bp
;
return
bufhead
[
0
].
next_frame
;
}
void
x86_backtrace
(
struct
pt_regs
*
const
regs
,
unsigned
int
depth
)
{
struct
frame_head
*
head
=
(
struct
frame_head
*
)
frame_pointer
(
regs
);
struct
stack_frame
*
head
=
(
struct
stack_frame
*
)
frame_pointer
(
regs
);
if
(
!
user_mode_vm
(
regs
))
{
unsigned
long
stack
=
kernel_stack_pointer
(
regs
);
...
...
@@ -86,6 +131,9 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
return
;
}
if
(
x86_backtrace_32
(
regs
,
depth
))
return
;
while
(
depth
--
&&
head
)
head
=
dump_user_backtrace
(
head
);
}
arch/x86/oprofile/nmi_int.c
浏览文件 @
f92f6e6e
...
...
@@ -695,9 +695,6 @@ static int __init ppro_init(char **cpu_type)
return
1
;
}
/* in order to get sysfs right */
static
int
using_nmi
;
int
__init
op_nmi_init
(
struct
oprofile_operations
*
ops
)
{
__u8
vendor
=
boot_cpu_data
.
x86_vendor
;
...
...
@@ -705,8 +702,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
char
*
cpu_type
=
NULL
;
int
ret
=
0
;
using_nmi
=
0
;
if
(
!
cpu_has_apic
)
return
-
ENODEV
;
...
...
@@ -790,13 +785,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
if
(
ret
)
return
ret
;
using_nmi
=
1
;
printk
(
KERN_INFO
"oprofile: using NMI interrupt.
\n
"
);
return
0
;
}
void
op_nmi_exit
(
void
)
{
if
(
using_nmi
)
exit_sysfs
();
}
drivers/oprofile/oprof.c
浏览文件 @
f92f6e6e
...
...
@@ -225,26 +225,17 @@ void oprofile_shutdown(void)
mutex_unlock
(
&
start_mutex
);
}
int
oprofile_set_
backtrace
(
unsigned
long
val
)
int
oprofile_set_
ulong
(
unsigned
long
*
addr
,
unsigned
long
val
)
{
int
err
=
0
;
int
err
=
-
EBUSY
;
mutex_lock
(
&
start_mutex
);
if
(
oprofile_started
)
{
err
=
-
EBUSY
;
goto
out
;
}
if
(
!
oprofile_ops
.
backtrace
)
{
err
=
-
EINVAL
;
goto
out
;
if
(
!
oprofile_started
)
{
*
addr
=
val
;
err
=
0
;
}
oprofile_backtrace_depth
=
val
;
out:
mutex_unlock
(
&
start_mutex
);
return
err
;
}
...
...
@@ -257,16 +248,9 @@ static int __init oprofile_init(void)
printk
(
KERN_INFO
"oprofile: using timer interrupt.
\n
"
);
err
=
oprofile_timer_init
(
&
oprofile_ops
);
if
(
err
)
goto
out_arch
;
}
err
=
oprofilefs_register
();
if
(
err
)
goto
out_arch
;
return
0
;
out_arch:
oprofile_arch_exit
();
return
err
;
}
return
oprofilefs_register
();
}
...
...
drivers/oprofile/oprof.h
浏览文件 @
f92f6e6e
...
...
@@ -37,7 +37,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root);
int
oprofile_timer_init
(
struct
oprofile_operations
*
ops
);
void
oprofile_timer_exit
(
void
);
int
oprofile_set_
backtrace
(
unsigned
long
depth
);
int
oprofile_set_
ulong
(
unsigned
long
*
addr
,
unsigned
long
val
);
int
oprofile_set_timeout
(
unsigned
long
time
);
#endif
/* OPROF_H */
drivers/oprofile/oprofile_files.c
浏览文件 @
f92f6e6e
...
...
@@ -79,14 +79,17 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
if
(
*
offset
)
return
-
EINVAL
;
if
(
!
oprofile_ops
.
backtrace
)
return
-
EINVAL
;
retval
=
oprofilefs_ulong_from_user
(
&
val
,
buf
,
count
);
if
(
retval
)
return
retval
;
retval
=
oprofile_set_backtrace
(
val
);
retval
=
oprofile_set_ulong
(
&
oprofile_backtrace_depth
,
val
);
if
(
retval
)
return
retval
;
return
count
;
}
...
...
drivers/oprofile/oprofile_perf.c
0 → 100644
浏览文件 @
f92f6e6e
/*
* Copyright 2010 ARM Ltd.
*
* Perf-events backend for OProfile.
*/
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/oprofile.h>
#include <linux/slab.h>
/*
* Per performance monitor configuration as set via oprofilefs.
*/
struct
op_counter_config
{
unsigned
long
count
;
unsigned
long
enabled
;
unsigned
long
event
;
unsigned
long
unit_mask
;
unsigned
long
kernel
;
unsigned
long
user
;
struct
perf_event_attr
attr
;
};
static
int
oprofile_perf_enabled
;
static
DEFINE_MUTEX
(
oprofile_perf_mutex
);
static
struct
op_counter_config
*
counter_config
;
static
struct
perf_event
**
perf_events
[
nr_cpumask_bits
];
static
int
num_counters
;
/*
* Overflow callback for oprofile.
*/
static
void
op_overflow_handler
(
struct
perf_event
*
event
,
int
unused
,
struct
perf_sample_data
*
data
,
struct
pt_regs
*
regs
)
{
int
id
;
u32
cpu
=
smp_processor_id
();
for
(
id
=
0
;
id
<
num_counters
;
++
id
)
if
(
perf_events
[
cpu
][
id
]
==
event
)
break
;
if
(
id
!=
num_counters
)
oprofile_add_sample
(
regs
,
id
);
else
pr_warning
(
"oprofile: ignoring spurious overflow "
"on cpu %u
\n
"
,
cpu
);
}
/*
* Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
* settings in counter_config. Attributes are created as `pinned' events and
* so are permanently scheduled on the PMU.
*/
static
void
op_perf_setup
(
void
)
{
int
i
;
u32
size
=
sizeof
(
struct
perf_event_attr
);
struct
perf_event_attr
*
attr
;
for
(
i
=
0
;
i
<
num_counters
;
++
i
)
{
attr
=
&
counter_config
[
i
].
attr
;
memset
(
attr
,
0
,
size
);
attr
->
type
=
PERF_TYPE_RAW
;
attr
->
size
=
size
;
attr
->
config
=
counter_config
[
i
].
event
;
attr
->
sample_period
=
counter_config
[
i
].
count
;
attr
->
pinned
=
1
;
}
}
static
int
op_create_counter
(
int
cpu
,
int
event
)
{
struct
perf_event
*
pevent
;
if
(
!
counter_config
[
event
].
enabled
||
perf_events
[
cpu
][
event
])
return
0
;
pevent
=
perf_event_create_kernel_counter
(
&
counter_config
[
event
].
attr
,
cpu
,
NULL
,
op_overflow_handler
);
if
(
IS_ERR
(
pevent
))
return
PTR_ERR
(
pevent
);
if
(
pevent
->
state
!=
PERF_EVENT_STATE_ACTIVE
)
{
perf_event_release_kernel
(
pevent
);
pr_warning
(
"oprofile: failed to enable event %d "
"on CPU %d
\n
"
,
event
,
cpu
);
return
-
EBUSY
;
}
perf_events
[
cpu
][
event
]
=
pevent
;
return
0
;
}
static
void
op_destroy_counter
(
int
cpu
,
int
event
)
{
struct
perf_event
*
pevent
=
perf_events
[
cpu
][
event
];
if
(
pevent
)
{
perf_event_release_kernel
(
pevent
);
perf_events
[
cpu
][
event
]
=
NULL
;
}
}
/*
* Called by oprofile_perf_start to create active perf events based on the
* perviously configured attributes.
*/
static
int
op_perf_start
(
void
)
{
int
cpu
,
event
,
ret
=
0
;
for_each_online_cpu
(
cpu
)
{
for
(
event
=
0
;
event
<
num_counters
;
++
event
)
{
ret
=
op_create_counter
(
cpu
,
event
);
if
(
ret
)
return
ret
;
}
}
return
ret
;
}
/*
* Called by oprofile_perf_stop at the end of a profiling run.
*/
static
void
op_perf_stop
(
void
)
{
int
cpu
,
event
;
for_each_online_cpu
(
cpu
)
for
(
event
=
0
;
event
<
num_counters
;
++
event
)
op_destroy_counter
(
cpu
,
event
);
}
static
int
oprofile_perf_create_files
(
struct
super_block
*
sb
,
struct
dentry
*
root
)
{
unsigned
int
i
;
for
(
i
=
0
;
i
<
num_counters
;
i
++
)
{
struct
dentry
*
dir
;
char
buf
[
4
];
snprintf
(
buf
,
sizeof
buf
,
"%d"
,
i
);
dir
=
oprofilefs_mkdir
(
sb
,
root
,
buf
);
oprofilefs_create_ulong
(
sb
,
dir
,
"enabled"
,
&
counter_config
[
i
].
enabled
);
oprofilefs_create_ulong
(
sb
,
dir
,
"event"
,
&
counter_config
[
i
].
event
);
oprofilefs_create_ulong
(
sb
,
dir
,
"count"
,
&
counter_config
[
i
].
count
);
oprofilefs_create_ulong
(
sb
,
dir
,
"unit_mask"
,
&
counter_config
[
i
].
unit_mask
);
oprofilefs_create_ulong
(
sb
,
dir
,
"kernel"
,
&
counter_config
[
i
].
kernel
);
oprofilefs_create_ulong
(
sb
,
dir
,
"user"
,
&
counter_config
[
i
].
user
);
}
return
0
;
}
static
int
oprofile_perf_setup
(
void
)
{
spin_lock
(
&
oprofilefs_lock
);
op_perf_setup
();
spin_unlock
(
&
oprofilefs_lock
);
return
0
;
}
static
int
oprofile_perf_start
(
void
)
{
int
ret
=
-
EBUSY
;
mutex_lock
(
&
oprofile_perf_mutex
);
if
(
!
oprofile_perf_enabled
)
{
ret
=
0
;
op_perf_start
();
oprofile_perf_enabled
=
1
;
}
mutex_unlock
(
&
oprofile_perf_mutex
);
return
ret
;
}
static
void
oprofile_perf_stop
(
void
)
{
mutex_lock
(
&
oprofile_perf_mutex
);
if
(
oprofile_perf_enabled
)
op_perf_stop
();
oprofile_perf_enabled
=
0
;
mutex_unlock
(
&
oprofile_perf_mutex
);
}
#ifdef CONFIG_PM
static
int
oprofile_perf_suspend
(
struct
platform_device
*
dev
,
pm_message_t
state
)
{
mutex_lock
(
&
oprofile_perf_mutex
);
if
(
oprofile_perf_enabled
)
op_perf_stop
();
mutex_unlock
(
&
oprofile_perf_mutex
);
return
0
;
}
static
int
oprofile_perf_resume
(
struct
platform_device
*
dev
)
{
mutex_lock
(
&
oprofile_perf_mutex
);
if
(
oprofile_perf_enabled
&&
op_perf_start
())
oprofile_perf_enabled
=
0
;
mutex_unlock
(
&
oprofile_perf_mutex
);
return
0
;
}
static
struct
platform_driver
oprofile_driver
=
{
.
driver
=
{
.
name
=
"oprofile-perf"
,
},
.
resume
=
oprofile_perf_resume
,
.
suspend
=
oprofile_perf_suspend
,
};
static
struct
platform_device
*
oprofile_pdev
;
static
int
__init
init_driverfs
(
void
)
{
int
ret
;
ret
=
platform_driver_register
(
&
oprofile_driver
);
if
(
ret
)
return
ret
;
oprofile_pdev
=
platform_device_register_simple
(
oprofile_driver
.
driver
.
name
,
0
,
NULL
,
0
);
if
(
IS_ERR
(
oprofile_pdev
))
{
ret
=
PTR_ERR
(
oprofile_pdev
);
platform_driver_unregister
(
&
oprofile_driver
);
}
return
ret
;
}
static
void
exit_driverfs
(
void
)
{
platform_device_unregister
(
oprofile_pdev
);
platform_driver_unregister
(
&
oprofile_driver
);
}
#else
static
inline
int
init_driverfs
(
void
)
{
return
0
;
}
static
inline
void
exit_driverfs
(
void
)
{
}
#endif
/* CONFIG_PM */
void
oprofile_perf_exit
(
void
)
{
int
cpu
,
id
;
struct
perf_event
*
event
;
for_each_possible_cpu
(
cpu
)
{
for
(
id
=
0
;
id
<
num_counters
;
++
id
)
{
event
=
perf_events
[
cpu
][
id
];
if
(
event
)
perf_event_release_kernel
(
event
);
}
kfree
(
perf_events
[
cpu
]);
}
kfree
(
counter_config
);
exit_driverfs
();
}
int
__init
oprofile_perf_init
(
struct
oprofile_operations
*
ops
)
{
int
cpu
,
ret
=
0
;
ret
=
init_driverfs
();
if
(
ret
)
return
ret
;
memset
(
&
perf_events
,
0
,
sizeof
(
perf_events
));
num_counters
=
perf_num_counters
();
if
(
num_counters
<=
0
)
{
pr_info
(
"oprofile: no performance counters
\n
"
);
ret
=
-
ENODEV
;
goto
out
;
}
counter_config
=
kcalloc
(
num_counters
,
sizeof
(
struct
op_counter_config
),
GFP_KERNEL
);
if
(
!
counter_config
)
{
pr_info
(
"oprofile: failed to allocate %d "
"counters
\n
"
,
num_counters
);
ret
=
-
ENOMEM
;
num_counters
=
0
;
goto
out
;
}
for_each_possible_cpu
(
cpu
)
{
perf_events
[
cpu
]
=
kcalloc
(
num_counters
,
sizeof
(
struct
perf_event
*
),
GFP_KERNEL
);
if
(
!
perf_events
[
cpu
])
{
pr_info
(
"oprofile: failed to allocate %d perf events "
"for cpu %d
\n
"
,
num_counters
,
cpu
);
ret
=
-
ENOMEM
;
goto
out
;
}
}
ops
->
create_files
=
oprofile_perf_create_files
;
ops
->
setup
=
oprofile_perf_setup
;
ops
->
start
=
oprofile_perf_start
;
ops
->
stop
=
oprofile_perf_stop
;
ops
->
shutdown
=
oprofile_perf_stop
;
ops
->
cpu_type
=
op_name_from_perf_id
();
if
(
!
ops
->
cpu_type
)
ret
=
-
ENODEV
;
else
pr_info
(
"oprofile: using %s
\n
"
,
ops
->
cpu_type
);
out:
if
(
ret
)
oprofile_perf_exit
();
return
ret
;
}
drivers/oprofile/oprofilefs.c
浏览文件 @
f92f6e6e
...
...
@@ -91,16 +91,20 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
static
ssize_t
ulong_write_file
(
struct
file
*
file
,
char
const
__user
*
buf
,
size_t
count
,
loff_t
*
offset
)
{
unsigned
long
*
value
=
file
->
private_data
;
unsigned
long
value
;
int
retval
;
if
(
*
offset
)
return
-
EINVAL
;
retval
=
oprofilefs_ulong_from_user
(
value
,
buf
,
count
);
retval
=
oprofilefs_ulong_from_user
(
&
value
,
buf
,
count
);
if
(
retval
)
return
retval
;
retval
=
oprofile_set_ulong
(
file
->
private_data
,
value
);
if
(
retval
)
return
retval
;
return
count
;
}
...
...
@@ -126,50 +130,41 @@ static const struct file_operations ulong_ro_fops = {
};
static
struct
dentry
*
__oprofilefs_create_file
(
struct
super_block
*
sb
,
static
int
__oprofilefs_create_file
(
struct
super_block
*
sb
,
struct
dentry
*
root
,
char
const
*
name
,
const
struct
file_operations
*
fops
,
int
perm
)
int
perm
,
void
*
priv
)
{
struct
dentry
*
dentry
;
struct
inode
*
inode
;
dentry
=
d_alloc_name
(
root
,
name
);
if
(
!
dentry
)
return
NULL
;
return
-
ENOMEM
;
inode
=
oprofilefs_get_inode
(
sb
,
S_IFREG
|
perm
);
if
(
!
inode
)
{
dput
(
dentry
);
return
NULL
;
return
-
ENOMEM
;
}
inode
->
i_fop
=
fops
;
d_add
(
dentry
,
inode
);
return
dentry
;
dentry
->
d_inode
->
i_private
=
priv
;
return
0
;
}
int
oprofilefs_create_ulong
(
struct
super_block
*
sb
,
struct
dentry
*
root
,
char
const
*
name
,
unsigned
long
*
val
)
{
struct
dentry
*
d
=
__oprofilefs_create_file
(
sb
,
root
,
name
,
&
ulong_fops
,
0644
);
if
(
!
d
)
return
-
EFAULT
;
d
->
d_inode
->
i_private
=
val
;
return
0
;
return
__oprofilefs_create_file
(
sb
,
root
,
name
,
&
ulong_fops
,
0644
,
val
);
}
int
oprofilefs_create_ro_ulong
(
struct
super_block
*
sb
,
struct
dentry
*
root
,
char
const
*
name
,
unsigned
long
*
val
)
{
struct
dentry
*
d
=
__oprofilefs_create_file
(
sb
,
root
,
name
,
&
ulong_ro_fops
,
0444
);
if
(
!
d
)
return
-
EFAULT
;
d
->
d_inode
->
i_private
=
val
;
return
0
;
return
__oprofilefs_create_file
(
sb
,
root
,
name
,
&
ulong_ro_fops
,
0444
,
val
);
}
...
...
@@ -189,31 +184,22 @@ static const struct file_operations atomic_ro_fops = {
int
oprofilefs_create_ro_atomic
(
struct
super_block
*
sb
,
struct
dentry
*
root
,
char
const
*
name
,
atomic_t
*
val
)
{
struct
dentry
*
d
=
__oprofilefs_create_file
(
sb
,
root
,
name
,
&
atomic_ro_fops
,
0444
);
if
(
!
d
)
return
-
EFAULT
;
d
->
d_inode
->
i_private
=
val
;
return
0
;
return
__oprofilefs_create_file
(
sb
,
root
,
name
,
&
atomic_ro_fops
,
0444
,
val
);
}
int
oprofilefs_create_file
(
struct
super_block
*
sb
,
struct
dentry
*
root
,
char
const
*
name
,
const
struct
file_operations
*
fops
)
{
if
(
!
__oprofilefs_create_file
(
sb
,
root
,
name
,
fops
,
0644
))
return
-
EFAULT
;
return
0
;
return
__oprofilefs_create_file
(
sb
,
root
,
name
,
fops
,
0644
,
NULL
);
}
int
oprofilefs_create_file_perm
(
struct
super_block
*
sb
,
struct
dentry
*
root
,
char
const
*
name
,
const
struct
file_operations
*
fops
,
int
perm
)
{
if
(
!
__oprofilefs_create_file
(
sb
,
root
,
name
,
fops
,
perm
))
return
-
EFAULT
;
return
0
;
return
__oprofilefs_create_file
(
sb
,
root
,
name
,
fops
,
perm
,
NULL
);
}
...
...
include/linux/oprofile.h
浏览文件 @
f92f6e6e
...
...
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <asm/atomic.h>
/* Each escaped entry is prefixed by ESCAPE_CODE
...
...
@@ -185,4 +186,10 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val);
int
oprofile_add_data64
(
struct
op_entry
*
entry
,
u64
val
);
int
oprofile_write_commit
(
struct
op_entry
*
entry
);
#ifdef CONFIG_PERF_EVENTS
int
__init
oprofile_perf_init
(
struct
oprofile_operations
*
ops
);
void
oprofile_perf_exit
(
void
);
char
*
op_name_from_perf_id
(
void
);
#endif
/* CONFIG_PERF_EVENTS */
#endif
/* OPROFILE_H */
include/linux/perf_event.h
浏览文件 @
f92f6e6e
...
...
@@ -890,6 +890,8 @@ struct perf_output_handle {
extern
int
perf_pmu_register
(
struct
pmu
*
pmu
);
extern
void
perf_pmu_unregister
(
struct
pmu
*
pmu
);
extern
int
perf_num_counters
(
void
);
extern
const
char
*
perf_pmu_name
(
void
);
extern
void
perf_event_task_sched_in
(
struct
task_struct
*
task
);
extern
void
perf_event_task_sched_out
(
struct
task_struct
*
task
,
struct
task_struct
*
next
);
extern
int
perf_event_init_task
(
struct
task_struct
*
child
);
...
...
kernel/perf_event.c
浏览文件 @
f92f6e6e
...
...
@@ -63,6 +63,11 @@ static atomic64_t perf_event_id;
void
__weak
perf_event_print_debug
(
void
)
{
}
extern
__weak
const
char
*
perf_pmu_name
(
void
)
{
return
"pmu"
;
}
void
perf_pmu_disable
(
struct
pmu
*
pmu
)
{
int
*
count
=
this_cpu_ptr
(
pmu
->
pmu_disable_count
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录