Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
03b30d15
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
161
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
03b30d15
编写于
1月 22, 2009
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'tracing/ftrace' into tracing/core
上级
b43f7093
3690b5e6
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
280 addition
and
82 deletion
+280
-82
Documentation/ftrace.txt
Documentation/ftrace.txt
+74
-0
arch/x86/kernel/ds.c
arch/x86/kernel/ds.c
+17
-14
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack.c
+6
-0
include/linux/ftrace.h
include/linux/ftrace.h
+13
-0
kernel/trace/trace.h
kernel/trace/trace.h
+0
-1
kernel/trace/trace_hw_branches.c
kernel/trace/trace_hw_branches.c
+141
-32
kernel/trace/trace_workqueue.c
kernel/trace/trace_workqueue.c
+29
-35
未找到文件。
Documentation/ftrace.txt
浏览文件 @
03b30d15
...
...
@@ -165,6 +165,8 @@ Here is the list of current tracers that may be configured.
nop - This is not a tracer. To remove all tracers from tracing
simply echo "nop" into current_tracer.
hw-branch-tracer - traces branches on all cpu's in a circular buffer.
Examples of using the tracer
----------------------------
...
...
@@ -1152,6 +1154,78 @@ int main (int argc, char **argv)
return 0;
}
hw-branch-tracer (x86 only)
---------------------------
This tracer uses the x86 last branch tracing hardware feature to
collect a branch trace on all cpus with relatively low overhead.
The tracer uses a fixed-size circular buffer per cpu and only
traces ring 0 branches. The trace file dumps that buffer in the
following format:
# tracer: hw-branch-tracer
#
# CPU# TO <- FROM
0 scheduler_tick+0xb5/0x1bf <- task_tick_idle+0x5/0x6
2 run_posix_cpu_timers+0x2b/0x72a <- run_posix_cpu_timers+0x25/0x72a
0 scheduler_tick+0x139/0x1bf <- scheduler_tick+0xed/0x1bf
0 scheduler_tick+0x17c/0x1bf <- scheduler_tick+0x148/0x1bf
2 run_posix_cpu_timers+0x9e/0x72a <- run_posix_cpu_timers+0x5e/0x72a
0 scheduler_tick+0x1b6/0x1bf <- scheduler_tick+0x1aa/0x1bf
The tracer may be used to dump the trace for the oops'ing cpu on a
kernel oops into the system log. To enable this, ftrace_dump_on_oops
must be set. To set ftrace_dump_on_oops, one can either use the sysctl
function or set it via the proc system interface.
sysctl kernel.ftrace_dump_on_oops=1
or
echo 1 > /proc/sys/kernel/ftrace_dump_on_oops
Here's an example of such a dump after a null pointer dereference in a
kernel module:
[57848.105921] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
[57848.106019] IP: [<ffffffffa0000006>] open+0x6/0x14 [oops]
[57848.106019] PGD 2354e9067 PUD 2375e7067 PMD 0
[57848.106019] Oops: 0002 [#1] SMP
[57848.106019] last sysfs file: /sys/devices/pci0000:00/0000:00:1e.0/0000:20:05.0/local_cpus
[57848.106019] Dumping ftrace buffer:
[57848.106019] ---------------------------------
[...]
[57848.106019] 0 chrdev_open+0xe6/0x165 <- cdev_put+0x23/0x24
[57848.106019] 0 chrdev_open+0x117/0x165 <- chrdev_open+0xfa/0x165
[57848.106019] 0 chrdev_open+0x120/0x165 <- chrdev_open+0x11c/0x165
[57848.106019] 0 chrdev_open+0x134/0x165 <- chrdev_open+0x12b/0x165
[57848.106019] 0 open+0x0/0x14 [oops] <- chrdev_open+0x144/0x165
[57848.106019] 0 page_fault+0x0/0x30 <- open+0x6/0x14 [oops]
[57848.106019] 0 error_entry+0x0/0x5b <- page_fault+0x4/0x30
[57848.106019] 0 error_kernelspace+0x0/0x31 <- error_entry+0x59/0x5b
[57848.106019] 0 error_sti+0x0/0x1 <- error_kernelspace+0x2d/0x31
[57848.106019] 0 page_fault+0x9/0x30 <- error_sti+0x0/0x1
[57848.106019] 0 do_page_fault+0x0/0x881 <- page_fault+0x1a/0x30
[...]
[57848.106019] 0 do_page_fault+0x66b/0x881 <- is_prefetch+0x1ee/0x1f2
[57848.106019] 0 do_page_fault+0x6e0/0x881 <- do_page_fault+0x67a/0x881
[57848.106019] 0 oops_begin+0x0/0x96 <- do_page_fault+0x6e0/0x881
[57848.106019] 0 trace_hw_branch_oops+0x0/0x2d <- oops_begin+0x9/0x96
[...]
[57848.106019] 0 ds_suspend_bts+0x2a/0xe3 <- ds_suspend_bts+0x1a/0xe3
[57848.106019] ---------------------------------
[57848.106019] CPU 0
[57848.106019] Modules linked in: oops
[57848.106019] Pid: 5542, comm: cat Tainted: G W 2.6.28 #23
[57848.106019] RIP: 0010:[<ffffffffa0000006>] [<ffffffffa0000006>] open+0x6/0x14 [oops]
[57848.106019] RSP: 0018:ffff880235457d48 EFLAGS: 00010246
[...]
dynamic ftrace
--------------
...
...
arch/x86/kernel/ds.c
浏览文件 @
03b30d15
...
...
@@ -15,8 +15,8 @@
* - buffer allocation (memory accounting)
*
*
* Copyright (C) 2007-200
8
Intel Corporation.
* Markus Metzger <markus.t.metzger@intel.com>, 2007-200
8
* Copyright (C) 2007-200
9
Intel Corporation.
* Markus Metzger <markus.t.metzger@intel.com>, 2007-200
9
*/
...
...
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
}
static
const
struct
ds_configuration
ds_cfg_netburst
=
{
.
name
=
"
n
etburst"
,
.
name
=
"
N
etburst"
,
.
ctl
[
dsf_bts
]
=
(
1
<<
2
)
|
(
1
<<
3
),
.
ctl
[
dsf_bts_kernel
]
=
(
1
<<
5
),
.
ctl
[
dsf_bts_user
]
=
(
1
<<
6
),
...
...
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = {
#endif
};
static
const
struct
ds_configuration
ds_cfg_pentium_m
=
{
.
name
=
"
pentium m
"
,
.
name
=
"
Pentium M
"
,
.
ctl
[
dsf_bts
]
=
(
1
<<
6
)
|
(
1
<<
7
),
.
sizeof_field
=
sizeof
(
long
),
...
...
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = {
.
sizeof_rec
[
ds_pebs
]
=
sizeof
(
long
)
*
18
,
#endif
};
static
const
struct
ds_configuration
ds_cfg_core2
=
{
.
name
=
"
core 2
"
,
static
const
struct
ds_configuration
ds_cfg_core2
_atom
=
{
.
name
=
"
Core 2/Atom
"
,
.
ctl
[
dsf_bts
]
=
(
1
<<
6
)
|
(
1
<<
7
),
.
ctl
[
dsf_bts_kernel
]
=
(
1
<<
9
),
.
ctl
[
dsf_bts_user
]
=
(
1
<<
10
),
...
...
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
switch
(
c
->
x86
)
{
case
0x6
:
switch
(
c
->
x86_model
)
{
case
0
...
0xC
:
/* sorry, don't know about them */
break
;
case
0xD
:
case
0xE
:
/* Pentium M */
case
0x9
:
case
0xd
:
/* Pentium M */
ds_configure
(
&
ds_cfg_pentium_m
);
break
;
default:
/* Core2, Atom, ... */
ds_configure
(
&
ds_cfg_core2
);
case
0xf
:
case
0x17
:
/* Core2 */
case
0x1c
:
/* Atom */
ds_configure
(
&
ds_cfg_core2_atom
);
break
;
case
0x1a
:
/* i7 */
default:
/* sorry, don't know about them */
break
;
}
break
;
case
0x
F
:
case
0x
f
:
switch
(
c
->
x86_model
)
{
case
0x0
:
case
0x1
:
...
...
arch/x86/kernel/dumpstack.c
浏览文件 @
03b30d15
...
...
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
#include <linux/ftrace.h>
#include <asm/stacktrace.h>
...
...
@@ -195,6 +196,11 @@ unsigned __kprobes long oops_begin(void)
int
cpu
;
unsigned
long
flags
;
/* notify the hw-branch tracer so it may disable tracing and
add the last trace to the trace buffer -
the earlier this happens, the more useful the trace. */
trace_hw_branch_oops
();
oops_enter
();
/* racy, but better than risking deadlock. */
...
...
include/linux/ftrace.h
浏览文件 @
03b30d15
...
...
@@ -496,4 +496,17 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
#endif
/* CONFIG_TRACING */
#ifdef CONFIG_HW_BRANCH_TRACER
void
trace_hw_branch
(
u64
from
,
u64
to
);
void
trace_hw_branch_oops
(
void
);
#else
/* CONFIG_HW_BRANCH_TRACER */
static
inline
void
trace_hw_branch
(
u64
from
,
u64
to
)
{}
static
inline
void
trace_hw_branch_oops
(
void
)
{}
#endif
/* CONFIG_HW_BRANCH_TRACER */
#endif
/* _LINUX_FTRACE_H */
kernel/trace/trace.h
浏览文件 @
03b30d15
...
...
@@ -438,7 +438,6 @@ void trace_function(struct trace_array *tr,
void
trace_graph_return
(
struct
ftrace_graph_ret
*
trace
);
int
trace_graph_entry
(
struct
ftrace_graph_ent
*
trace
);
void
trace_hw_branch
(
struct
trace_array
*
tr
,
u64
from
,
u64
to
);
void
tracing_start_cmdline_record
(
void
);
void
tracing_stop_cmdline_record
(
void
);
...
...
kernel/trace/trace_hw_branches.c
浏览文件 @
03b30d15
/*
* h/w branch tracer for x86 based on bts
*
* Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
* Copyright (C) 2008-2009 Intel Corporation.
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
*
*/
...
...
@@ -10,6 +11,9 @@
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/ds.h>
...
...
@@ -19,13 +23,32 @@
#define SIZEOF_BTS (1 << 13)
/* The tracer mutex protects the below per-cpu tracer array.
It needs to be held to:
- start tracing on all cpus
- stop tracing on all cpus
- start tracing on a single hotplug cpu
- stop tracing on a single hotplug cpu
- read the trace from all cpus
- read the trace from a single cpu
*/
static
DEFINE_MUTEX
(
bts_tracer_mutex
);
static
DEFINE_PER_CPU
(
struct
bts_tracer
*
,
tracer
);
static
DEFINE_PER_CPU
(
unsigned
char
[
SIZEOF_BTS
],
buffer
);
#define this_tracer per_cpu(tracer, smp_processor_id())
#define this_buffer per_cpu(buffer, smp_processor_id())
static
int
__read_mostly
trace_hw_branches_enabled
;
static
struct
trace_array
*
hw_branch_trace
__read_mostly
;
/*
* Start tracing on the current cpu.
* The argument is ignored.
*
* pre: bts_tracer_mutex must be locked.
*/
static
void
bts_trace_start_cpu
(
void
*
arg
)
{
if
(
this_tracer
)
...
...
@@ -43,14 +66,20 @@ static void bts_trace_start_cpu(void *arg)
static
void
bts_trace_start
(
struct
trace_array
*
tr
)
{
int
cpu
;
mutex_lock
(
&
bts_tracer_mutex
)
;
tracing_reset_online_cpus
(
tr
);
on_each_cpu
(
bts_trace_start_cpu
,
NULL
,
1
);
trace_hw_branches_enabled
=
1
;
for_each_cpu
(
cpu
,
cpu_possible_mask
)
smp_call_function_single
(
cpu
,
bts_trace_start_cpu
,
NULL
,
1
);
mutex_unlock
(
&
bts_tracer_mutex
);
}
/*
* Start tracing on the current cpu.
* The argument is ignored.
*
* pre: bts_tracer_mutex must be locked.
*/
static
void
bts_trace_stop_cpu
(
void
*
arg
)
{
if
(
this_tracer
)
{
...
...
@@ -61,26 +90,63 @@ static void bts_trace_stop_cpu(void *arg)
static
void
bts_trace_stop
(
struct
trace_array
*
tr
)
{
int
cpu
;
mutex_lock
(
&
bts_tracer_mutex
);
trace_hw_branches_enabled
=
0
;
on_each_cpu
(
bts_trace_stop_cpu
,
NULL
,
1
);
for_each_cpu
(
cpu
,
cpu_possible_mask
)
mutex_unlock
(
&
bts_tracer_mutex
);
}
static
int
__cpuinit
bts_hotcpu_handler
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
{
unsigned
int
cpu
=
(
unsigned
long
)
hcpu
;
mutex_lock
(
&
bts_tracer_mutex
);
if
(
!
trace_hw_branches_enabled
)
goto
out
;
switch
(
action
)
{
case
CPU_ONLINE
:
case
CPU_DOWN_FAILED
:
smp_call_function_single
(
cpu
,
bts_trace_start_cpu
,
NULL
,
1
);
break
;
case
CPU_DOWN_PREPARE
:
smp_call_function_single
(
cpu
,
bts_trace_stop_cpu
,
NULL
,
1
);
break
;
}
out:
mutex_unlock
(
&
bts_tracer_mutex
);
return
NOTIFY_DONE
;
}
static
struct
notifier_block
bts_hotcpu_notifier
__cpuinitdata
=
{
.
notifier_call
=
bts_hotcpu_handler
};
static
int
bts_trace_init
(
struct
trace_array
*
tr
)
{
hw_branch_trace
=
tr
;
register_hotcpu_notifier
(
&
bts_hotcpu_notifier
);
tracing_reset_online_cpus
(
tr
);
bts_trace_start
(
tr
);
return
0
;
}
static
void
bts_trace_reset
(
struct
trace_array
*
tr
)
{
bts_trace_stop
(
tr
);
unregister_hotcpu_notifier
(
&
bts_hotcpu_notifier
);
}
static
void
bts_trace_print_header
(
struct
seq_file
*
m
)
{
seq_puts
(
m
,
"# CPU# FROM TO FUNCTION
\n
"
);
seq_puts
(
m
,
"# | | | |
\n
"
);
seq_puts
(
m
,
"# CPU# TO <- FROM
\n
"
);
}
static
enum
print_line_t
bts_trace_print_line
(
struct
trace_iterator
*
iter
)
...
...
@@ -88,15 +154,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
struct
trace_entry
*
entry
=
iter
->
ent
;
struct
trace_seq
*
seq
=
&
iter
->
seq
;
struct
hw_branch_entry
*
it
;
unsigned
long
symflags
=
TRACE_ITER_SYM_OFFSET
;
trace_assign_type
(
it
,
entry
);
if
(
entry
->
type
==
TRACE_HW_BRANCHES
)
{
if
(
trace_seq_printf
(
seq
,
"%4d "
,
entry
->
cpu
)
&&
trace_seq_printf
(
seq
,
"0x%016llx -> 0x%016llx "
,
it
->
from
,
it
->
to
)
&&
(
!
it
->
from
||
seq_print_ip_sym
(
seq
,
it
->
from
,
/* sym_flags = */
0
))
&&
seq_print_ip_sym
(
seq
,
it
->
to
,
symflags
)
&&
trace_seq_printf
(
seq
,
"
\t
<- "
)
&&
seq_print_ip_sym
(
seq
,
it
->
from
,
symflags
)
&&
trace_seq_printf
(
seq
,
"
\n
"
))
return
TRACE_TYPE_HANDLED
;
return
TRACE_TYPE_PARTIAL_LINE
;;
...
...
@@ -104,26 +170,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
return
TRACE_TYPE_UNHANDLED
;
}
void
trace_hw_branch
(
struct
trace_array
*
tr
,
u64
from
,
u64
to
)
void
trace_hw_branch
(
u64
from
,
u64
to
)
{
struct
trace_array
*
tr
=
hw_branch_trace
;
struct
ring_buffer_event
*
event
;
struct
hw_branch_entry
*
entry
;
unsigned
long
irq
;
unsigned
long
irq1
,
irq2
;
int
cpu
;
event
=
ring_buffer_lock_reserve
(
tr
->
buffer
,
sizeof
(
*
entry
),
&
irq
);
if
(
!
event
)
if
(
unlikely
(
!
tr
))
return
;
if
(
unlikely
(
!
trace_hw_branches_enabled
))
return
;
local_irq_save
(
irq1
);
cpu
=
raw_smp_processor_id
();
if
(
atomic_inc_return
(
&
tr
->
data
[
cpu
]
->
disabled
)
!=
1
)
goto
out
;
event
=
ring_buffer_lock_reserve
(
tr
->
buffer
,
sizeof
(
*
entry
),
&
irq2
);
if
(
!
event
)
goto
out
;
entry
=
ring_buffer_event_data
(
event
);
tracing_generic_entry_update
(
&
entry
->
ent
,
0
,
from
);
entry
->
ent
.
type
=
TRACE_HW_BRANCHES
;
entry
->
ent
.
cpu
=
smp_processor_id
()
;
entry
->
ent
.
cpu
=
cpu
;
entry
->
from
=
from
;
entry
->
to
=
to
;
ring_buffer_unlock_commit
(
tr
->
buffer
,
event
,
irq
);
ring_buffer_unlock_commit
(
tr
->
buffer
,
event
,
irq2
);
out:
atomic_dec
(
&
tr
->
data
[
cpu
]
->
disabled
);
local_irq_restore
(
irq1
);
}
static
void
trace_bts_at
(
struct
trace_array
*
tr
,
const
struct
bts_trace
*
trace
,
void
*
at
)
static
void
trace_bts_at
(
const
struct
bts_trace
*
trace
,
void
*
at
)
{
struct
bts_struct
bts
;
int
err
=
0
;
...
...
@@ -138,18 +220,29 @@ static void trace_bts_at(struct trace_array *tr,
switch
(
bts
.
qualifier
)
{
case
BTS_BRANCH
:
trace_hw_branch
(
tr
,
bts
.
variant
.
lbr
.
from
,
bts
.
variant
.
lbr
.
to
);
trace_hw_branch
(
bts
.
variant
.
lbr
.
from
,
bts
.
variant
.
lbr
.
to
);
break
;
}
}
/*
* Collect the trace on the current cpu and write it into the ftrace buffer.
*
* pre: bts_tracer_mutex must be locked
*/
static
void
trace_bts_cpu
(
void
*
arg
)
{
struct
trace_array
*
tr
=
(
struct
trace_array
*
)
arg
;
const
struct
bts_trace
*
trace
;
unsigned
char
*
at
;
if
(
!
this_tracer
)
if
(
unlikely
(
!
tr
))
return
;
if
(
unlikely
(
atomic_read
(
&
tr
->
data
[
raw_smp_processor_id
()]
->
disabled
)))
return
;
if
(
unlikely
(
!
this_tracer
))
return
;
ds_suspend_bts
(
this_tracer
);
...
...
@@ -159,11 +252,11 @@ static void trace_bts_cpu(void *arg)
for
(
at
=
trace
->
ds
.
top
;
(
void
*
)
at
<
trace
->
ds
.
end
;
at
+=
trace
->
ds
.
size
)
trace_bts_at
(
tr
,
tr
ace
,
at
);
trace_bts_at
(
trace
,
at
);
for
(
at
=
trace
->
ds
.
begin
;
(
void
*
)
at
<
trace
->
ds
.
top
;
at
+=
trace
->
ds
.
size
)
trace_bts_at
(
tr
,
tr
ace
,
at
);
trace_bts_at
(
trace
,
at
);
out:
ds_resume_bts
(
this_tracer
);
...
...
@@ -171,22 +264,38 @@ static void trace_bts_cpu(void *arg)
static
void
trace_bts_prepare
(
struct
trace_iterator
*
iter
)
{
int
cpu
;
mutex_lock
(
&
bts_tracer_mutex
);
on_each_cpu
(
trace_bts_cpu
,
iter
->
tr
,
1
);
mutex_unlock
(
&
bts_tracer_mutex
);
}
static
void
trace_bts_close
(
struct
trace_iterator
*
iter
)
{
tracing_reset_online_cpus
(
iter
->
tr
);
}
void
trace_hw_branch_oops
(
void
)
{
mutex_lock
(
&
bts_tracer_mutex
);
trace_bts_cpu
(
hw_branch_trace
);
for_each_cpu
(
cpu
,
cpu_possible_mask
)
smp_call_function_single
(
cpu
,
trace_bts_cpu
,
iter
->
tr
,
1
);
mutex_unlock
(
&
bts_tracer_mutex
);
}
struct
tracer
bts_tracer
__read_mostly
=
{
.
name
=
"hw-branch-tracer"
,
.
init
=
bts_trace_init
,
.
reset
=
bts_trace_
stop
,
.
reset
=
bts_trace_
reset
,
.
print_header
=
bts_trace_print_header
,
.
print_line
=
bts_trace_print_line
,
.
start
=
bts_trace_start
,
.
stop
=
bts_trace_stop
,
.
open
=
trace_bts_prepare
.
open
=
trace_bts_prepare
,
.
close
=
trace_bts_close
};
__init
static
int
init_bts_trace
(
void
)
...
...
kernel/trace/trace_workqueue.c
浏览文件 @
03b30d15
...
...
@@ -8,6 +8,7 @@
#include <trace/workqueue.h>
#include <linux/list.h>
#include <linux/percpu.h>
#include "trace_stat.h"
#include "trace.h"
...
...
@@ -37,7 +38,8 @@ struct workqueue_global_stats {
/* Don't need a global lock because allocated before the workqueues, and
* never freed.
*/
static
struct
workqueue_global_stats
*
all_workqueue_stat
;
static
DEFINE_PER_CPU
(
struct
workqueue_global_stats
,
all_workqueue_stat
);
#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
/* Insertion of a work */
static
void
...
...
@@ -48,8 +50,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
struct
cpu_workqueue_stats
*
node
,
*
next
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
all_workqueue_stat
[
cpu
].
list
,
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
workqueue_cpu_stat
(
cpu
)
->
list
,
list
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
atomic_inc
(
&
node
->
inserted
);
...
...
@@ -58,7 +60,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
}
pr_debug
(
"trace_workqueue: entry not found
\n
"
);
found:
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
}
/* Execution of a work */
...
...
@@ -70,8 +72,8 @@ probe_workqueue_execution(struct task_struct *wq_thread,
struct
cpu_workqueue_stats
*
node
,
*
next
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
all_workqueue_stat
[
cpu
].
list
,
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
workqueue_cpu_stat
(
cpu
)
->
list
,
list
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
node
->
executed
++
;
...
...
@@ -80,7 +82,7 @@ probe_workqueue_execution(struct task_struct *wq_thread,
}
pr_debug
(
"trace_workqueue: entry not found
\n
"
);
found:
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
}
/* Creation of a cpu workqueue thread */
...
...
@@ -104,11 +106,11 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
cws
->
pid
=
wq_thread
->
pid
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
if
(
list_empty
(
&
all_workqueue_stat
[
cpu
].
list
))
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
if
(
list_empty
(
&
workqueue_cpu_stat
(
cpu
)
->
list
))
cws
->
first_entry
=
true
;
list_add_tail
(
&
cws
->
list
,
&
all_workqueue_stat
[
cpu
].
list
);
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
list_add_tail
(
&
cws
->
list
,
&
workqueue_cpu_stat
(
cpu
)
->
list
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
}
/* Destruction of a cpu workqueue thread */
...
...
@@ -119,8 +121,8 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
struct
cpu_workqueue_stats
*
node
,
*
next
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
all_workqueue_stat
[
cpu
].
list
,
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
workqueue_cpu_stat
(
cpu
)
->
list
,
list
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
list_del
(
&
node
->
list
);
...
...
@@ -131,7 +133,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
pr_debug
(
"trace_workqueue: don't find workqueue to destroy
\n
"
);
found:
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
}
...
...
@@ -141,13 +143,13 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
struct
cpu_workqueue_stats
*
ret
=
NULL
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
if
(
!
list_empty
(
&
all_workqueue_stat
[
cpu
].
list
))
ret
=
list_entry
(
all_workqueue_stat
[
cpu
].
list
.
next
,
if
(
!
list_empty
(
&
workqueue_cpu_stat
(
cpu
)
->
list
))
ret
=
list_entry
(
workqueue_cpu_stat
(
cpu
)
->
list
.
next
,
struct
cpu_workqueue_stats
,
list
);
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
return
ret
;
}
...
...
@@ -172,9 +174,9 @@ static void *workqueue_stat_next(void *prev, int idx)
unsigned
long
flags
;
void
*
ret
=
NULL
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
if
(
list_is_last
(
&
prev_cws
->
list
,
&
all_workqueue_stat
[
cpu
].
list
))
{
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
if
(
list_is_last
(
&
prev_cws
->
list
,
&
workqueue_cpu_stat
(
cpu
)
->
list
))
{
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
for
(
++
cpu
;
cpu
<
num_possible_cpus
();
cpu
++
)
{
ret
=
workqueue_stat_start_cpu
(
cpu
);
if
(
ret
)
...
...
@@ -182,7 +184,7 @@ static void *workqueue_stat_next(void *prev, int idx)
}
return
NULL
;
}
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
return
list_entry
(
prev_cws
->
list
.
next
,
struct
cpu_workqueue_stats
,
list
);
...
...
@@ -199,10 +201,10 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
cws
->
executed
,
trace_find_cmdline
(
cws
->
pid
));
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
if
(
&
cws
->
list
==
all_workqueue_stat
[
cpu
].
list
.
next
)
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
if
(
&
cws
->
list
==
workqueue_cpu_stat
(
cpu
)
->
list
.
next
)
seq_printf
(
s
,
"
\n
"
);
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
return
0
;
}
...
...
@@ -258,17 +260,9 @@ int __init trace_workqueue_early_init(void)
if
(
ret
)
goto
no_creation
;
all_workqueue_stat
=
kmalloc
(
sizeof
(
struct
workqueue_global_stats
)
*
num_possible_cpus
(),
GFP_KERNEL
);
if
(
!
all_workqueue_stat
)
{
pr_warning
(
"trace_workqueue: not enough memory
\n
"
);
goto
no_creation
;
}
for_each_possible_cpu
(
cpu
)
{
spin_lock_init
(
&
all_workqueue_stat
[
cpu
].
lock
);
INIT_LIST_HEAD
(
&
all_workqueue_stat
[
cpu
].
list
);
spin_lock_init
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
);
INIT_LIST_HEAD
(
&
workqueue_cpu_stat
(
cpu
)
->
list
);
}
return
0
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录