Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
25e03a74
cloud-kernel
项目概览
openanolis
/
cloud-kernel
接近 2 年 前同步成功
通知
169
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
25e03a74
编写于
10月 15, 2013
作者:
P
Paul E. McKenney
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'gp.2013.09.25a' into HEAD
gp.2013.09.25a: Topic branch for grace-period updates.
上级
460aebac
15f5191b
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
144 addition
and
29 deletion
+144
-29
include/trace/events/rcu.h
include/trace/events/rcu.h
+66
-14
kernel/rcutiny.c
kernel/rcutiny.c
+1
-1
kernel/rcutree.c
kernel/rcutree.c
+46
-10
kernel/rcutree_plugin.h
kernel/rcutree_plugin.h
+31
-4
未找到文件。
include/trace/events/rcu.h
浏览文件 @
25e03a74
...
@@ -39,15 +39,26 @@ TRACE_EVENT(rcu_utilization,
...
@@ -39,15 +39,26 @@ TRACE_EVENT(rcu_utilization,
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
/*
/*
* Tracepoint for grace-period events: starting and ending a grace
* Tracepoint for grace-period events. Takes a string identifying the
* period ("start" and "end", respectively), a CPU noting the start
* RCU flavor, the grace-period number, and a string identifying the
* of a new grace period or the end of an old grace period ("cpustart"
* grace-period-related event as follows:
* and "cpuend", respectively), a CPU passing through a quiescent
*
* state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
* "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL.
* and "cpuofl", respectively), a CPU being kicked for being too
* "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
* long in dyntick-idle mode ("kick"), a CPU accelerating its new
* "newreq": Request a new grace period.
* callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU
* "start": Start a grace period.
* accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB").
* "cpustart": CPU first notices a grace-period start.
* "cpuqs": CPU passes through a quiescent state.
* "cpuonl": CPU comes online.
* "cpuofl": CPU goes offline.
* "reqwait": GP kthread sleeps waiting for grace-period request.
* "reqwaitsig": GP kthread awakened by signal from reqwait state.
* "fqswait": GP kthread waiting until time to force quiescent states.
* "fqsstart": GP kthread starts forcing quiescent states.
* "fqsend": GP kthread done forcing quiescent states.
* "fqswaitsig": GP kthread awakened by signal from fqswait state.
* "end": End a grace period.
* "cpuend": CPU first notices a grace-period end.
*/
*/
TRACE_EVENT
(
rcu_grace_period
,
TRACE_EVENT
(
rcu_grace_period
,
...
@@ -160,6 +171,46 @@ TRACE_EVENT(rcu_grace_period_init,
...
@@ -160,6 +171,46 @@ TRACE_EVENT(rcu_grace_period_init,
__entry
->
grplo
,
__entry
->
grphi
,
__entry
->
qsmask
)
__entry
->
grplo
,
__entry
->
grphi
,
__entry
->
qsmask
)
);
);
/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
*
* The first argument is the name of the RCU flavor, and the second is
* the number of the offloaded CPU are extracted. The third and final
* argument is a string as follows:
*
* "WakeEmpty": Wake rcuo kthread, first CB to empty list.
* "WakeOvf": Wake rcuo kthread, CB list is huge.
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
* "Poll": Start of new polling cycle for rcu_nocb_poll.
* "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
* "WokeEmpty": rcuo kthread woke to find empty list.
* "WokeNonEmpty": rcuo kthread woke to find non-empty list.
* "WaitQueue": Enqueue partially done, timed wait for it to complete.
* "WokeQueue": Partial enqueue now complete.
*/
TRACE_EVENT
(
rcu_nocb_wake
,
TP_PROTO
(
const
char
*
rcuname
,
int
cpu
,
const
char
*
reason
),
TP_ARGS
(
rcuname
,
cpu
,
reason
),
TP_STRUCT__entry
(
__field
(
const
char
*
,
rcuname
)
__field
(
int
,
cpu
)
__field
(
const
char
*
,
reason
)
),
TP_fast_assign
(
__entry
->
rcuname
=
rcuname
;
__entry
->
cpu
=
cpu
;
__entry
->
reason
=
reason
;
),
TP_printk
(
"%s %d %s"
,
__entry
->
rcuname
,
__entry
->
cpu
,
__entry
->
reason
)
);
/*
/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
* Tracepoint for tasks blocking within preemptible-RCU read-side
* critical sections. Track the type of RCU (which one day might
* critical sections. Track the type of RCU (which one day might
...
@@ -540,17 +591,17 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
...
@@ -540,17 +591,17 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
TRACE_EVENT
(
rcu_batch_end
,
TRACE_EVENT
(
rcu_batch_end
,
TP_PROTO
(
const
char
*
rcuname
,
int
callbacks_invoked
,
TP_PROTO
(
const
char
*
rcuname
,
int
callbacks_invoked
,
bool
cb
,
bool
nr
,
bool
iit
,
bool
risk
),
char
cb
,
char
nr
,
char
iit
,
char
risk
),
TP_ARGS
(
rcuname
,
callbacks_invoked
,
cb
,
nr
,
iit
,
risk
),
TP_ARGS
(
rcuname
,
callbacks_invoked
,
cb
,
nr
,
iit
,
risk
),
TP_STRUCT__entry
(
TP_STRUCT__entry
(
__field
(
const
char
*
,
rcuname
)
__field
(
const
char
*
,
rcuname
)
__field
(
int
,
callbacks_invoked
)
__field
(
int
,
callbacks_invoked
)
__field
(
bool
,
cb
)
__field
(
char
,
cb
)
__field
(
bool
,
nr
)
__field
(
char
,
nr
)
__field
(
bool
,
iit
)
__field
(
char
,
iit
)
__field
(
bool
,
risk
)
__field
(
char
,
risk
)
),
),
TP_fast_assign
(
TP_fast_assign
(
...
@@ -656,6 +707,7 @@ TRACE_EVENT(rcu_barrier,
...
@@ -656,6 +707,7 @@ TRACE_EVENT(rcu_barrier,
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
level, grplo, grphi, event) \
level, grplo, grphi, event) \
do { } while (0)
do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
...
...
kernel/rcutiny.c
浏览文件 @
25e03a74
...
@@ -275,7 +275,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
...
@@ -275,7 +275,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
if
(
&
rcp
->
rcucblist
==
rcp
->
donetail
)
{
if
(
&
rcp
->
rcucblist
==
rcp
->
donetail
)
{
RCU_TRACE
(
trace_rcu_batch_start
(
rcp
->
name
,
0
,
0
,
-
1
));
RCU_TRACE
(
trace_rcu_batch_start
(
rcp
->
name
,
0
,
0
,
-
1
));
RCU_TRACE
(
trace_rcu_batch_end
(
rcp
->
name
,
0
,
RCU_TRACE
(
trace_rcu_batch_end
(
rcp
->
name
,
0
,
ACCESS_ONCE
(
rcp
->
rcucblist
),
!!
ACCESS_ONCE
(
rcp
->
rcucblist
),
need_resched
(),
need_resched
(),
is_idle_task
(
current
),
is_idle_task
(
current
),
false
));
false
));
...
...
kernel/rcutree.c
浏览文件 @
25e03a74
...
@@ -1326,7 +1326,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
...
@@ -1326,7 +1326,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
}
}
/*
/*
* Initialize a new grace period.
* Initialize a new grace period.
Return 0 if no grace period required.
*/
*/
static
int
rcu_gp_init
(
struct
rcu_state
*
rsp
)
static
int
rcu_gp_init
(
struct
rcu_state
*
rsp
)
{
{
...
@@ -1335,10 +1335,18 @@ static int rcu_gp_init(struct rcu_state *rsp)
...
@@ -1335,10 +1335,18 @@ static int rcu_gp_init(struct rcu_state *rsp)
rcu_bind_gp_kthread
();
rcu_bind_gp_kthread
();
raw_spin_lock_irq
(
&
rnp
->
lock
);
raw_spin_lock_irq
(
&
rnp
->
lock
);
if
(
rsp
->
gp_flags
==
0
)
{
/* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq
(
&
rnp
->
lock
);
return
0
;
}
rsp
->
gp_flags
=
0
;
/* Clear all flags: New grace period. */
rsp
->
gp_flags
=
0
;
/* Clear all flags: New grace period. */
if
(
rcu_gp_in_progress
(
rsp
))
{
if
(
WARN_ON_ONCE
(
rcu_gp_in_progress
(
rsp
)))
{
/* Grace period already in progress, don't start another. */
/*
* Grace period already in progress, don't start another.
* Not supposed to be able to happen.
*/
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
(
&
rnp
->
lock
);
return
0
;
return
0
;
}
}
...
@@ -1481,8 +1489,12 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
...
@@ -1481,8 +1489,12 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rsp
->
fqs_state
=
RCU_GP_IDLE
;
rsp
->
fqs_state
=
RCU_GP_IDLE
;
rdp
=
this_cpu_ptr
(
rsp
->
rda
);
rdp
=
this_cpu_ptr
(
rsp
->
rda
);
rcu_advance_cbs
(
rsp
,
rnp
,
rdp
);
/* Reduce false positives below. */
rcu_advance_cbs
(
rsp
,
rnp
,
rdp
);
/* Reduce false positives below. */
if
(
cpu_needs_another_gp
(
rsp
,
rdp
))
if
(
cpu_needs_another_gp
(
rsp
,
rdp
))
{
rsp
->
gp_flags
=
RCU_GP_FLAG_INIT
;
rsp
->
gp_flags
=
RCU_GP_FLAG_INIT
;
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"newreq"
));
}
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
(
&
rnp
->
lock
);
}
}
...
@@ -1492,6 +1504,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
...
@@ -1492,6 +1504,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
static
int
__noreturn
rcu_gp_kthread
(
void
*
arg
)
static
int
__noreturn
rcu_gp_kthread
(
void
*
arg
)
{
{
int
fqs_state
;
int
fqs_state
;
int
gf
;
unsigned
long
j
;
unsigned
long
j
;
int
ret
;
int
ret
;
struct
rcu_state
*
rsp
=
arg
;
struct
rcu_state
*
rsp
=
arg
;
...
@@ -1501,14 +1514,19 @@ static int __noreturn rcu_gp_kthread(void *arg)
...
@@ -1501,14 +1514,19 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Handle grace-period start. */
/* Handle grace-period start. */
for
(;;)
{
for
(;;)
{
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"reqwait"
));
wait_event_interruptible
(
rsp
->
gp_wq
,
wait_event_interruptible
(
rsp
->
gp_wq
,
rsp
->
gp_flags
&
ACCESS_ONCE
(
rsp
->
gp_flags
)
&
RCU_GP_FLAG_INIT
);
RCU_GP_FLAG_INIT
);
if
((
rsp
->
gp_flags
&
RCU_GP_FLAG_INIT
)
&&
if
(
rcu_gp_init
(
rsp
))
rcu_gp_init
(
rsp
))
break
;
break
;
cond_resched
();
cond_resched
();
flush_signals
(
current
);
flush_signals
(
current
);
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"reqwaitsig"
));
}
}
/* Handle quiescent-state forcing. */
/* Handle quiescent-state forcing. */
...
@@ -1518,10 +1536,16 @@ static int __noreturn rcu_gp_kthread(void *arg)
...
@@ -1518,10 +1536,16 @@ static int __noreturn rcu_gp_kthread(void *arg)
j
=
HZ
;
j
=
HZ
;
jiffies_till_first_fqs
=
HZ
;
jiffies_till_first_fqs
=
HZ
;
}
}
ret
=
0
;
for
(;;)
{
for
(;;)
{
rsp
->
jiffies_force_qs
=
jiffies
+
j
;
if
(
!
ret
)
rsp
->
jiffies_force_qs
=
jiffies
+
j
;
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"fqswait"
));
ret
=
wait_event_interruptible_timeout
(
rsp
->
gp_wq
,
ret
=
wait_event_interruptible_timeout
(
rsp
->
gp_wq
,
(
rsp
->
gp_flags
&
RCU_GP_FLAG_FQS
)
||
((
gf
=
ACCESS_ONCE
(
rsp
->
gp_flags
))
&
RCU_GP_FLAG_FQS
)
||
(
!
ACCESS_ONCE
(
rnp
->
qsmask
)
&&
(
!
ACCESS_ONCE
(
rnp
->
qsmask
)
&&
!
rcu_preempt_blocked_readers_cgp
(
rnp
)),
!
rcu_preempt_blocked_readers_cgp
(
rnp
)),
j
);
j
);
...
@@ -1530,13 +1554,23 @@ static int __noreturn rcu_gp_kthread(void *arg)
...
@@ -1530,13 +1554,23 @@ static int __noreturn rcu_gp_kthread(void *arg)
!
rcu_preempt_blocked_readers_cgp
(
rnp
))
!
rcu_preempt_blocked_readers_cgp
(
rnp
))
break
;
break
;
/* If time for quiescent-state forcing, do it. */
/* If time for quiescent-state forcing, do it. */
if
(
ret
==
0
||
(
rsp
->
gp_flags
&
RCU_GP_FLAG_FQS
))
{
if
(
ULONG_CMP_GE
(
jiffies
,
rsp
->
jiffies_force_qs
)
||
(
gf
&
RCU_GP_FLAG_FQS
))
{
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"fqsstart"
));
fqs_state
=
rcu_gp_fqs
(
rsp
,
fqs_state
);
fqs_state
=
rcu_gp_fqs
(
rsp
,
fqs_state
);
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"fqsend"
));
cond_resched
();
cond_resched
();
}
else
{
}
else
{
/* Deal with stray signal. */
/* Deal with stray signal. */
cond_resched
();
cond_resched
();
flush_signals
(
current
);
flush_signals
(
current
);
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"fqswaitsig"
));
}
}
j
=
jiffies_till_next_fqs
;
j
=
jiffies_till_next_fqs
;
if
(
j
>
HZ
)
{
if
(
j
>
HZ
)
{
...
@@ -1584,6 +1618,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
...
@@ -1584,6 +1618,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
return
;
return
;
}
}
rsp
->
gp_flags
=
RCU_GP_FLAG_INIT
;
rsp
->
gp_flags
=
RCU_GP_FLAG_INIT
;
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"newreq"
));
/*
/*
* We can't do wakeups while holding the rnp->lock, as that
* We can't do wakeups while holding the rnp->lock, as that
...
...
kernel/rcutree_plugin.h
浏览文件 @
25e03a74
...
@@ -2113,15 +2113,22 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
...
@@ -2113,15 +2113,22 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
/* If we are not being polled and there is a kthread, awaken it ... */
/* If we are not being polled and there is a kthread, awaken it ... */
t
=
ACCESS_ONCE
(
rdp
->
nocb_kthread
);
t
=
ACCESS_ONCE
(
rdp
->
nocb_kthread
);
if
(
rcu_nocb_poll
||
!
t
)
if
(
rcu_nocb_poll
||
!
t
)
{
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WakeNotPoll"
));
return
;
return
;
}
len
=
atomic_long_read
(
&
rdp
->
nocb_q_count
);
len
=
atomic_long_read
(
&
rdp
->
nocb_q_count
);
if
(
old_rhpp
==
&
rdp
->
nocb_head
)
{
if
(
old_rhpp
==
&
rdp
->
nocb_head
)
{
wake_up
(
&
rdp
->
nocb_wq
);
/* ... only if queue was empty ... */
wake_up
(
&
rdp
->
nocb_wq
);
/* ... only if queue was empty ... */
rdp
->
qlen_last_fqs_check
=
0
;
rdp
->
qlen_last_fqs_check
=
0
;
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WakeEmpty"
));
}
else
if
(
len
>
rdp
->
qlen_last_fqs_check
+
qhimark
)
{
}
else
if
(
len
>
rdp
->
qlen_last_fqs_check
+
qhimark
)
{
wake_up_process
(
t
);
/* ... or if many callbacks queued. */
wake_up_process
(
t
);
/* ... or if many callbacks queued. */
rdp
->
qlen_last_fqs_check
=
LONG_MAX
/
2
;
rdp
->
qlen_last_fqs_check
=
LONG_MAX
/
2
;
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WakeOvf"
));
}
else
{
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WakeNot"
));
}
}
return
;
return
;
}
}
...
@@ -2145,10 +2152,12 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
...
@@ -2145,10 +2152,12 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
if
(
__is_kfree_rcu_offset
((
unsigned
long
)
rhp
->
func
))
if
(
__is_kfree_rcu_offset
((
unsigned
long
)
rhp
->
func
))
trace_rcu_kfree_callback
(
rdp
->
rsp
->
name
,
rhp
,
trace_rcu_kfree_callback
(
rdp
->
rsp
->
name
,
rhp
,
(
unsigned
long
)
rhp
->
func
,
(
unsigned
long
)
rhp
->
func
,
rdp
->
qlen_lazy
,
rdp
->
qlen
);
-
atomic_long_read
(
&
rdp
->
nocb_q_count_lazy
),
-
atomic_long_read
(
&
rdp
->
nocb_q_count
));
else
else
trace_rcu_callback
(
rdp
->
rsp
->
name
,
rhp
,
trace_rcu_callback
(
rdp
->
rsp
->
name
,
rhp
,
rdp
->
qlen_lazy
,
rdp
->
qlen
);
-
atomic_long_read
(
&
rdp
->
nocb_q_count_lazy
),
-
atomic_long_read
(
&
rdp
->
nocb_q_count
));
return
1
;
return
1
;
}
}
...
@@ -2226,6 +2235,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
...
@@ -2226,6 +2235,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
static
int
rcu_nocb_kthread
(
void
*
arg
)
static
int
rcu_nocb_kthread
(
void
*
arg
)
{
{
int
c
,
cl
;
int
c
,
cl
;
bool
firsttime
=
1
;
struct
rcu_head
*
list
;
struct
rcu_head
*
list
;
struct
rcu_head
*
next
;
struct
rcu_head
*
next
;
struct
rcu_head
**
tail
;
struct
rcu_head
**
tail
;
...
@@ -2234,14 +2244,27 @@ static int rcu_nocb_kthread(void *arg)
...
@@ -2234,14 +2244,27 @@ static int rcu_nocb_kthread(void *arg)
/* Each pass through this loop invokes one batch of callbacks */
/* Each pass through this loop invokes one batch of callbacks */
for
(;;)
{
for
(;;)
{
/* If not polling, wait for next batch of callbacks. */
/* If not polling, wait for next batch of callbacks. */
if
(
!
rcu_nocb_poll
)
if
(
!
rcu_nocb_poll
)
{
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"Sleep"
));
wait_event_interruptible
(
rdp
->
nocb_wq
,
rdp
->
nocb_head
);
wait_event_interruptible
(
rdp
->
nocb_wq
,
rdp
->
nocb_head
);
}
else
if
(
firsttime
)
{
firsttime
=
0
;
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"Poll"
));
}
list
=
ACCESS_ONCE
(
rdp
->
nocb_head
);
list
=
ACCESS_ONCE
(
rdp
->
nocb_head
);
if
(
!
list
)
{
if
(
!
list
)
{
if
(
!
rcu_nocb_poll
)
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WokeEmpty"
));
schedule_timeout_interruptible
(
1
);
schedule_timeout_interruptible
(
1
);
flush_signals
(
current
);
flush_signals
(
current
);
continue
;
continue
;
}
}
firsttime
=
1
;
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WokeNonEmpty"
));
/*
/*
* Extract queued callbacks, update counts, and wait
* Extract queued callbacks, update counts, and wait
...
@@ -2262,7 +2285,11 @@ static int rcu_nocb_kthread(void *arg)
...
@@ -2262,7 +2285,11 @@ static int rcu_nocb_kthread(void *arg)
next
=
list
->
next
;
next
=
list
->
next
;
/* Wait for enqueuing to complete, if needed. */
/* Wait for enqueuing to complete, if needed. */
while
(
next
==
NULL
&&
&
list
->
next
!=
tail
)
{
while
(
next
==
NULL
&&
&
list
->
next
!=
tail
)
{
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WaitQueue"
));
schedule_timeout_interruptible
(
1
);
schedule_timeout_interruptible
(
1
);
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WokeQueue"
));
next
=
list
->
next
;
next
=
list
->
next
;
}
}
debug_rcu_head_unqueue
(
list
);
debug_rcu_head_unqueue
(
list
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录