Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
8bc6782f
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
8bc6782f
编写于
3月 15, 2016
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge commit 'fixes.2015.02.23a' into core/rcu
Conflicts: kernel/rcu/tree.c Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
e23604ed
3500efae
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
159 addition
and
128 deletion
+159
-128
include/linux/compiler.h
include/linux/compiler.h
+8
-4
include/linux/irq.h
include/linux/irq.h
+4
-2
include/linux/rcupdate.h
include/linux/rcupdate.h
+0
-2
include/linux/srcu.h
include/linux/srcu.h
+17
-2
kernel/irq/internals.h
kernel/irq/internals.h
+4
-0
kernel/rcu/rcutorture.c
kernel/rcu/rcutorture.c
+8
-6
kernel/rcu/tiny_plugin.h
kernel/rcu/tiny_plugin.h
+2
-13
kernel/rcu/tree.c
kernel/rcu/tree.c
+70
-73
kernel/rcu/tree.h
kernel/rcu/tree.h
+31
-11
kernel/rcu/tree_plugin.h
kernel/rcu/tree_plugin.h
+13
-14
scripts/checkpatch.pl
scripts/checkpatch.pl
+2
-1
未找到文件。
include/linux/compiler.h
浏览文件 @
8bc6782f
...
...
@@ -20,12 +20,14 @@
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
#else
/* CONFIG_SPARSE_RCU_POINTER */
# define __rcu
#endif
#endif
/* CONFIG_SPARSE_RCU_POINTER */
# define __private __attribute__((noderef))
extern
void
__chk_user_ptr
(
const
volatile
void
__user
*
);
extern
void
__chk_io_ptr
(
const
volatile
void
__iomem
*
);
#else
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else
/* __CHECKER__ */
# define __user
# define __kernel
# define __safe
...
...
@@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __percpu
# define __rcu
# define __pmem
#endif
# define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
#endif
/* __CHECKER__ */
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
...
...
include/linux/irq.h
浏览文件 @
8bc6782f
...
...
@@ -137,7 +137,7 @@ struct irq_domain;
* @msi_desc: MSI descriptor
*/
struct
irq_common_data
{
unsigned
int
state_use_accessors
;
unsigned
int
__private
state_use_accessors
;
#ifdef CONFIG_NUMA
unsigned
int
node
;
#endif
...
...
@@ -208,7 +208,7 @@ enum {
IRQD_FORWARDED_TO_VCPU
=
(
1
<<
20
),
};
#define __irqd_to_state(d)
((d)->common->
state_use_accessors)
#define __irqd_to_state(d)
ACCESS_PRIVATE((d)->common,
state_use_accessors)
static
inline
bool
irqd_is_setaffinity_pending
(
struct
irq_data
*
d
)
{
...
...
@@ -299,6 +299,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
__irqd_to_state
(
d
)
&=
~
IRQD_FORWARDED_TO_VCPU
;
}
#undef __irqd_to_state
static
inline
irq_hw_number_t
irqd_to_hwirq
(
struct
irq_data
*
d
)
{
return
d
->
hwirq
;
...
...
include/linux/rcupdate.h
浏览文件 @
8bc6782f
...
...
@@ -360,8 +360,6 @@ void rcu_user_exit(void);
#else
static
inline
void
rcu_user_enter
(
void
)
{
}
static
inline
void
rcu_user_exit
(
void
)
{
}
static
inline
void
rcu_user_hooks_switch
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
)
{
}
#endif
/* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU
...
...
include/linux/srcu.h
浏览文件 @
8bc6782f
...
...
@@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work);
}
/*
* define and init a srcu struct at build time.
* dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
* Define and initialize a srcu struct at build time.
* Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
*
* Note that although DEFINE_STATIC_SRCU() hides the name from other
* files, the per-CPU variable rules nevertheless require that the
* chosen name be globally unique. These rules also prohibit use of
* DEFINE_STATIC_SRCU() within a function. If these rules are too
* restrictive, declare the srcu_struct manually. For example, in
* each file:
*
* static struct srcu_struct my_srcu;
*
* Then, before the first use of each my_srcu, manually initialize it:
*
* init_srcu_struct(&my_srcu);
*
* See include/linux/percpu-defs.h for the rules on per-CPU variables.
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
...
...
kernel/irq/internals.h
浏览文件 @
8bc6782f
...
...
@@ -160,6 +160,8 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
__irq_put_desc_unlock
(
desc
,
flags
,
false
);
}
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
/*
* Manipulation functions for irq_data.state
*/
...
...
@@ -188,6 +190,8 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
return
__irqd_to_state
(
d
)
&
mask
;
}
#undef __irqd_to_state
static
inline
void
kstat_incr_irqs_this_cpu
(
struct
irq_desc
*
desc
)
{
__this_cpu_inc
(
*
desc
->
kstat_irqs
);
...
...
kernel/rcu/rcutorture.c
浏览文件 @
8bc6782f
...
...
@@ -932,12 +932,14 @@ rcu_torture_writer(void *arg)
int
nsynctypes
=
0
;
VERBOSE_TOROUT_STRING
(
"rcu_torture_writer task started"
);
pr_alert
(
"%s"
TORTURE_FLAG
" Grace periods expedited from boot/sysfs for %s,
\n
"
,
torture_type
,
cur_ops
->
name
);
pr_alert
(
"%s"
TORTURE_FLAG
" Testing of dynamic grace-period expediting diabled.
\n
"
,
torture_type
);
if
(
!
can_expedite
)
{
pr_alert
(
"%s"
TORTURE_FLAG
" Grace periods expedited from boot/sysfs for %s,
\n
"
,
torture_type
,
cur_ops
->
name
);
pr_alert
(
"%s"
TORTURE_FLAG
" Disabled dynamic grace-period expediting.
\n
"
,
torture_type
);
}
/* Initialize synctype[] array. If none set, take default. */
if
(
!
gp_cond1
&&
!
gp_exp1
&&
!
gp_normal1
&&
!
gp_sync1
)
...
...
kernel/rcu/tiny_plugin.h
浏览文件 @
8bc6782f
...
...
@@ -23,7 +23,7 @@
*/
#include <linux/kthread.h>
#include <linux/
module
.h>
#include <linux/
init
.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
...
...
@@ -122,18 +122,7 @@ static int __init rcutiny_trace_init(void)
debugfs_remove_recursive
(
rcudir
);
return
1
;
}
static
void
__exit
rcutiny_trace_cleanup
(
void
)
{
debugfs_remove_recursive
(
rcudir
);
}
module_init
(
rcutiny_trace_init
);
module_exit
(
rcutiny_trace_cleanup
);
MODULE_AUTHOR
(
"Paul E. McKenney"
);
MODULE_DESCRIPTION
(
"Read-Copy Update tracing for tiny implementation"
);
MODULE_LICENSE
(
"GPL"
);
device_initcall
(
rcutiny_trace_init
);
static
void
check_cpu_stall
(
struct
rcu_ctrlblk
*
rcp
)
{
...
...
kernel/rcu/tree.c
浏览文件 @
8bc6782f
...
...
@@ -108,7 +108,6 @@ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
RCU_STATE_INITIALIZER
(
rcu_bh
,
'b'
,
call_rcu_bh
);
static
struct
rcu_state
*
const
rcu_state_p
;
static
struct
rcu_data
__percpu
*
const
rcu_data_p
;
LIST_HEAD
(
rcu_struct_flavors
);
/* Dump rcu_node combining tree at boot to verify correct setup. */
...
...
@@ -1083,13 +1082,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
rcu_sysidle_check_cpu
(
rdp
,
isidle
,
maxj
);
if
((
rdp
->
dynticks_snap
&
0x1
)
==
0
)
{
trace_rcu_fqs
(
rdp
->
rsp
->
name
,
rdp
->
gpnum
,
rdp
->
cpu
,
TPS
(
"dti"
));
return
1
;
}
else
{
if
(
ULONG_CMP_LT
(
READ_ONCE
(
rdp
->
gpnum
)
+
ULONG_MAX
/
4
,
rdp
->
mynode
->
gpnum
))
WRITE_ONCE
(
rdp
->
gpwrap
,
true
);
return
0
;
return
1
;
}
return
0
;
}
/*
...
...
@@ -1173,15 +1171,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
smp_mb
();
/* ->cond_resched_completed before *rcrmp. */
WRITE_ONCE
(
*
rcrmp
,
READ_ONCE
(
*
rcrmp
)
+
rdp
->
rsp
->
flavor_mask
);
resched_cpu
(
rdp
->
cpu
);
/* Force CPU into scheduler. */
rdp
->
rsp
->
jiffies_resched
+=
5
;
/* Enable beating. */
}
else
if
(
ULONG_CMP_GE
(
jiffies
,
rdp
->
rsp
->
jiffies_resched
))
{
/* Time to beat on that CPU again! */
resched_cpu
(
rdp
->
cpu
);
/* Force CPU into scheduler. */
rdp
->
rsp
->
jiffies_resched
+=
5
;
/* Re-enable beating. */
}
rdp
->
rsp
->
jiffies_resched
+=
5
;
/* Re-enable beating. */
}
/* And if it has been a really long time, kick the CPU as well. */
if
(
ULONG_CMP_GE
(
jiffies
,
rdp
->
rsp
->
gp_start
+
2
*
jiffies_till_sched_qs
)
||
ULONG_CMP_GE
(
jiffies
,
rdp
->
rsp
->
gp_start
+
jiffies_till_sched_qs
))
resched_cpu
(
rdp
->
cpu
);
/* Force CPU into scheduler. */
return
0
;
}
...
...
@@ -1246,7 +1245,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
if
(
rnp
->
qsmask
&
(
1UL
<<
cpu
))
dump_cpu_task
(
rnp
->
grplo
+
cpu
);
}
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
}
...
...
@@ -1266,12 +1265,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
delta
=
jiffies
-
READ_ONCE
(
rsp
->
jiffies_stall
);
if
(
delta
<
RCU_STALL_RAT_DELAY
||
!
rcu_gp_in_progress
(
rsp
))
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
;
}
WRITE_ONCE
(
rsp
->
jiffies_stall
,
jiffies
+
3
*
rcu_jiffies_till_stall_check
()
+
3
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
/*
* OK, time to rat on our buddy...
...
...
@@ -1292,7 +1291,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
ndetected
++
;
}
}
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
print_cpu_stall_info_end
();
...
...
@@ -1357,7 +1356,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
if
(
ULONG_CMP_GE
(
jiffies
,
READ_ONCE
(
rsp
->
jiffies_stall
)))
WRITE_ONCE
(
rsp
->
jiffies_stall
,
jiffies
+
3
*
rcu_jiffies_till_stall_check
()
+
3
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
/*
* Attempt to revive the RCU machinery by forcing a context switch.
...
...
@@ -1595,7 +1594,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
}
unlock_out:
if
(
rnp
!=
rnp_root
)
raw_spin_unlock
(
&
rnp_root
->
lock
);
raw_spin_unlock
_rcu_node
(
rnp_root
);
out:
if
(
c_out
!=
NULL
)
*
c_out
=
c
;
...
...
@@ -1814,7 +1813,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
return
;
}
needwake
=
__note_gp_changes
(
rsp
,
rnp
,
rdp
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
if
(
needwake
)
rcu_gp_kthread_wake
(
rsp
);
}
...
...
@@ -1839,7 +1838,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
raw_spin_lock_irq_rcu_node
(
rnp
);
if
(
!
READ_ONCE
(
rsp
->
gp_flags
))
{
/* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
return
false
;
}
WRITE_ONCE
(
rsp
->
gp_flags
,
0
);
/* Clear all flags: New grace period. */
...
...
@@ -1849,7 +1848,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* Grace period already in progress, don't start another.
* Not supposed to be able to happen.
*/
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
return
false
;
}
...
...
@@ -1858,7 +1857,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
/* Record GP times before starting GP, hence smp_store_release(). */
smp_store_release
(
&
rsp
->
gpnum
,
rsp
->
gpnum
+
1
);
trace_rcu_grace_period
(
rsp
->
name
,
rsp
->
gpnum
,
TPS
(
"start"
));
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
/*
* Apply per-leaf buffered online and offline operations to the
...
...
@@ -1872,7 +1871,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
if
(
rnp
->
qsmaskinit
==
rnp
->
qsmaskinitnext
&&
!
rnp
->
wait_blkd_tasks
)
{
/* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
continue
;
}
...
...
@@ -1906,7 +1905,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
rcu_cleanup_dead_rnp
(
rnp
);
}
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
}
/*
...
...
@@ -1937,7 +1936,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
trace_rcu_grace_period_init
(
rsp
->
name
,
rnp
->
gpnum
,
rnp
->
level
,
rnp
->
grplo
,
rnp
->
grphi
,
rnp
->
qsmask
);
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
cond_resched_rcu_qs
();
WRITE_ONCE
(
rsp
->
gp_activity
,
jiffies
);
}
...
...
@@ -1995,7 +1994,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
raw_spin_lock_irq_rcu_node
(
rnp
);
WRITE_ONCE
(
rsp
->
gp_flags
,
READ_ONCE
(
rsp
->
gp_flags
)
&
~
RCU_GP_FLAG_FQS
);
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
}
}
...
...
@@ -2025,7 +2024,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
* safe for us to drop the lock in order to mark the grace
* period as completed in all of the rcu_node structures.
*/
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
/*
* Propagate new ->completed value to rcu_node structures so
...
...
@@ -2047,7 +2046,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
/* smp_mb() provided by prior unlock-lock pair. */
nocb
+=
rcu_future_gp_cleanup
(
rsp
,
rnp
);
sq
=
rcu_nocb_gp_get
(
rnp
);
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
rcu_nocb_gp_cleanup
(
sq
);
cond_resched_rcu_qs
();
WRITE_ONCE
(
rsp
->
gp_activity
,
jiffies
);
...
...
@@ -2070,7 +2069,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
READ_ONCE
(
rsp
->
gpnum
),
TPS
(
"newreq"
));
}
raw_spin_unlock_irq
(
&
rnp
->
lock
);
raw_spin_unlock_irq
_rcu_node
(
rnp
);
}
/*
...
...
@@ -2236,18 +2235,20 @@ static bool rcu_start_gp(struct rcu_state *rsp)
}
/*
* Report a full set of quiescent states to the specified rcu_state
* data structure. This involves cleaning up after the prior grace
* period and letting rcu_start_gp() start up the next grace period
* if one is needed. Note that the caller must hold rnp->lock, which
* is released before return.
* Report a full set of quiescent states to the specified rcu_state data
* structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
* kthread if another grace period is required. Whether we wake
* the grace-period kthread or it awakens itself for the next round
* of quiescent-state forcing, that kthread will clean up after the
* just-completed grace period. Note that the caller must hold rnp->lock,
* which is released before return.
*/
static
void
rcu_report_qs_rsp
(
struct
rcu_state
*
rsp
,
unsigned
long
flags
)
__releases
(
rcu_get_root
(
rsp
)
->
lock
)
{
WARN_ON_ONCE
(
!
rcu_gp_in_progress
(
rsp
));
WRITE_ONCE
(
rsp
->
gp_flags
,
READ_ONCE
(
rsp
->
gp_flags
)
|
RCU_GP_FLAG_FQS
);
raw_spin_unlock_irqrestore
(
&
rcu_get_root
(
rsp
)
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rcu_get_root
(
rsp
)
,
flags
);
swake_up
(
&
rsp
->
gp_wq
);
/* Memory barrier implied by swake_up() path. */
}
...
...
@@ -2277,7 +2278,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
* Our bit has already been cleared, or the
* relevant grace period is already over, so done.
*/
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
;
}
WARN_ON_ONCE
(
oldmask
);
/* Any child must be all zeroed! */
...
...
@@ -2289,7 +2290,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
if
(
rnp
->
qsmask
!=
0
||
rcu_preempt_blocked_readers_cgp
(
rnp
))
{
/* Other bits still set at this level, so done. */
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
;
}
mask
=
rnp
->
grpmask
;
...
...
@@ -2299,7 +2300,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
break
;
}
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
rnp_c
=
rnp
;
rnp
=
rnp
->
parent
;
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
...
...
@@ -2331,7 +2332,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
if
(
rcu_state_p
==
&
rcu_sched_state
||
rsp
!=
rcu_state_p
||
rnp
->
qsmask
!=
0
||
rcu_preempt_blocked_readers_cgp
(
rnp
))
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
;
/* Still need more quiescent states! */
}
...
...
@@ -2348,19 +2349,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
/* Report up the rest of the hierarchy, tracking current ->gpnum. */
gps
=
rnp
->
gpnum
;
mask
=
rnp
->
grpmask
;
raw_spin_unlock
(
&
rnp
->
lock
);
/* irqs remain disabled. */
raw_spin_unlock
_rcu_node
(
rnp
);
/* irqs remain disabled. */
raw_spin_lock_rcu_node
(
rnp_p
);
/* irqs already disabled. */
rcu_report_qs_rnp
(
mask
,
rsp
,
rnp_p
,
gps
,
flags
);
}
/*
* Record a quiescent state for the specified CPU to that CPU's rcu_data
* structure. This must be either called from the specified CPU, or
* called when the specified CPU is known to be offline (and when it is
* also known that no other CPU is concurrently trying to help the offline
* CPU). The lastcomp argument is used to make sure we are still in the
* grace period of interest. We don't want to end the current grace period
* based on quiescent states detected in an earlier grace period!
* structure. This must be called from the specified CPU.
*/
static
void
rcu_report_qs_rdp
(
int
cpu
,
struct
rcu_state
*
rsp
,
struct
rcu_data
*
rdp
)
...
...
@@ -2385,14 +2381,14 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
*/
rdp
->
cpu_no_qs
.
b
.
norm
=
true
;
/* need qs for new gp. */
rdp
->
rcu_qs_ctr_snap
=
__this_cpu_read
(
rcu_qs_ctr
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
;
}
mask
=
rdp
->
grpmask
;
if
((
rnp
->
qsmask
&
mask
)
==
0
)
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
else
{
rdp
->
core_needs_qs
=
0
;
rdp
->
core_needs_qs
=
false
;
/*
* This GP can't end until cpu checks in, so all of our
...
...
@@ -2601,10 +2597,11 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
rnp
->
qsmaskinit
&=
~
mask
;
rnp
->
qsmask
&=
~
mask
;
if
(
rnp
->
qsmaskinit
)
{
raw_spin_unlock
(
&
rnp
->
lock
);
/* irqs remain disabled. */
raw_spin_unlock_rcu_node
(
rnp
);
/* irqs remain disabled. */
return
;
}
raw_spin_unlock
(
&
rnp
->
lock
);
/* irqs remain disabled. */
raw_spin_unlock
_rcu_node
(
rnp
);
/* irqs remain disabled. */
}
}
...
...
@@ -2627,7 +2624,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
mask
=
rdp
->
grpmask
;
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
/* Enforce GP memory-order guarantee. */
rnp
->
qsmaskinitnext
&=
~
mask
;
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
/*
...
...
@@ -2861,7 +2858,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
rcu_report_qs_rnp
(
mask
,
rsp
,
rnp
,
rnp
->
gpnum
,
flags
);
}
else
{
/* Nothing to do here, so just drop the lock. */
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
}
}
...
...
@@ -2897,11 +2894,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
raw_spin_unlock
(
&
rnp_old
->
fqslock
);
if
(
READ_ONCE
(
rsp
->
gp_flags
)
&
RCU_GP_FLAG_FQS
)
{
rsp
->
n_force_qs_lh
++
;
raw_spin_unlock_irqrestore
(
&
rnp_old
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp_old
,
flags
);
return
;
/* Someone beat us to it. */
}
WRITE_ONCE
(
rsp
->
gp_flags
,
READ_ONCE
(
rsp
->
gp_flags
)
|
RCU_GP_FLAG_FQS
);
raw_spin_unlock_irqrestore
(
&
rnp_old
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp_old
,
flags
);
swake_up
(
&
rsp
->
gp_wq
);
/* Memory barrier implied by swake_up() path. */
}
...
...
@@ -2927,7 +2924,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
if
(
cpu_needs_another_gp
(
rsp
,
rdp
))
{
raw_spin_lock_rcu_node
(
rcu_get_root
(
rsp
));
/* irqs disabled. */
needwake
=
rcu_start_gp
(
rsp
);
raw_spin_unlock_irqrestore
(
&
rcu_get_root
(
rsp
)
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rcu_get_root
(
rsp
)
,
flags
);
if
(
needwake
)
rcu_gp_kthread_wake
(
rsp
);
}
else
{
...
...
@@ -3018,7 +3015,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
raw_spin_lock_rcu_node
(
rnp_root
);
needwake
=
rcu_start_gp
(
rsp
);
raw_spin_unlock
(
&
rnp_root
->
lock
);
raw_spin_unlock
_rcu_node
(
rnp_root
);
if
(
needwake
)
rcu_gp_kthread_wake
(
rsp
);
}
else
{
...
...
@@ -3438,14 +3435,14 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
rcu_for_each_leaf_node
(
rsp
,
rnp
)
{
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
if
(
rnp
->
expmaskinit
==
rnp
->
expmaskinitnext
)
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
continue
;
/* No new CPUs, nothing to do. */
}
/* Update this node's mask, track old value for propagation. */
oldmask
=
rnp
->
expmaskinit
;
rnp
->
expmaskinit
=
rnp
->
expmaskinitnext
;
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
/* If was already nonzero, nothing to propagate. */
if
(
oldmask
)
...
...
@@ -3460,7 +3457,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
if
(
rnp_up
->
expmaskinit
)
done
=
true
;
rnp_up
->
expmaskinit
|=
mask
;
raw_spin_unlock_irqrestore
(
&
rnp_up
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp_up
,
flags
);
if
(
done
)
break
;
mask
=
rnp_up
->
grpmask
;
...
...
@@ -3483,7 +3480,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
WARN_ON_ONCE
(
rnp
->
expmask
);
rnp
->
expmask
=
rnp
->
expmaskinit
;
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
}
...
...
@@ -3524,11 +3521,11 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
if
(
!
rnp
->
expmask
)
rcu_initiate_boost
(
rnp
,
flags
);
else
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
break
;
}
if
(
rnp
->
parent
==
NULL
)
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
if
(
wake
)
{
smp_mb
();
/* EGP done before wake_up(). */
swake_up
(
&
rsp
->
expedited_wq
);
...
...
@@ -3536,7 +3533,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
break
;
}
mask
=
rnp
->
grpmask
;
raw_spin_unlock
(
&
rnp
->
lock
);
/* irqs remain disabled */
raw_spin_unlock
_rcu_node
(
rnp
);
/* irqs remain disabled */
rnp
=
rnp
->
parent
;
raw_spin_lock_rcu_node
(
rnp
);
/* irqs already disabled */
WARN_ON_ONCE
(
!
(
rnp
->
expmask
&
mask
));
...
...
@@ -3571,7 +3568,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
if
(
!
(
rnp
->
expmask
&
mask
))
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
;
}
rnp
->
expmask
&=
~
mask
;
...
...
@@ -3732,7 +3729,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
*/
if
(
rcu_preempt_has_tasks
(
rnp
))
rnp
->
exp_tasks
=
rnp
->
blkd_tasks
.
next
;
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
/* IPI the remaining CPUs for expedited quiescent state. */
mask
=
1
;
...
...
@@ -3749,7 +3746,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
if
(
cpu_online
(
cpu
)
&&
(
rnp
->
expmask
&
mask
))
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
schedule_timeout_uninterruptible
(
1
);
if
(
cpu_online
(
cpu
)
&&
(
rnp
->
expmask
&
mask
))
...
...
@@ -3758,7 +3755,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
}
if
(
!
(
rnp
->
expmask
&
mask
))
mask_ofl_ipi
&=
~
mask
;
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
/* Report quiescent states for those that went offline. */
mask_ofl_test
|=
mask_ofl_ipi
;
...
...
@@ -4165,7 +4162,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
return
;
raw_spin_lock_rcu_node
(
rnp
);
/* Interrupts already disabled. */
rnp
->
qsmaskinit
|=
mask
;
raw_spin_unlock
(
&
rnp
->
lock
);
/* Interrupts remain disabled. */
raw_spin_unlock
_rcu_node
(
rnp
);
/* Interrupts remain disabled. */
}
}
...
...
@@ -4189,7 +4186,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp
->
rsp
=
rsp
;
mutex_init
(
&
rdp
->
exp_funnel_mutex
);
rcu_boot_init_nocb_percpu_data
(
rdp
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
/*
...
...
@@ -4217,7 +4214,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rcu_sysidle_init_percpu_data
(
rdp
->
dynticks
);
atomic_set
(
&
rdp
->
dynticks
->
dynticks
,
(
atomic_read
(
&
rdp
->
dynticks
->
dynticks
)
&
~
0x1
)
+
1
);
raw_spin_unlock
(
&
rnp
->
lock
);
/* irqs remain disabled. */
raw_spin_unlock
_rcu_node
(
rnp
);
/* irqs remain disabled. */
/*
* Add CPU to leaf rcu_node pending-online bitmask. Any needed
...
...
@@ -4238,7 +4235,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp
->
rcu_qs_ctr_snap
=
per_cpu
(
rcu_qs_ctr
,
cpu
);
rdp
->
core_needs_qs
=
false
;
trace_rcu_grace_period
(
rsp
->
name
,
rdp
->
gpnum
,
TPS
(
"cpuonl"
));
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
static
void
rcu_prepare_cpu
(
int
cpu
)
...
...
@@ -4360,7 +4357,7 @@ static int __init rcu_spawn_gp_kthread(void)
sp
.
sched_priority
=
kthread_prio
;
sched_setscheduler_nocheck
(
t
,
SCHED_FIFO
,
&
sp
);
}
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
wake_up_process
(
t
);
}
rcu_spawn_nocb_kthreads
();
...
...
@@ -4451,8 +4448,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
cpustride
*=
levelspread
[
i
];
rnp
=
rsp
->
level
[
i
];
for
(
j
=
0
;
j
<
levelcnt
[
i
];
j
++
,
rnp
++
)
{
raw_spin_lock_init
(
&
rnp
->
lock
);
lockdep_set_class_and_name
(
&
rnp
->
lock
,
raw_spin_lock_init
(
&
ACCESS_PRIVATE
(
rnp
,
lock
)
);
lockdep_set_class_and_name
(
&
ACCESS_PRIVATE
(
rnp
,
lock
)
,
&
rcu_node_class
[
i
],
buf
[
i
]);
raw_spin_lock_init
(
&
rnp
->
fqslock
);
lockdep_set_class_and_name
(
&
rnp
->
fqslock
,
...
...
kernel/rcu/tree.h
浏览文件 @
8bc6782f
...
...
@@ -150,8 +150,9 @@ struct rcu_dynticks {
* Definition for node within the RCU grace-period-detection hierarchy.
*/
struct
rcu_node
{
raw_spinlock_t
lock
;
/* Root rcu_node's lock protects some */
/* rcu_state fields as well as following. */
raw_spinlock_t
__private
lock
;
/* Root rcu_node's lock protects */
/* some rcu_state fields as well as */
/* following. */
unsigned
long
gpnum
;
/* Current grace period for this node. */
/* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */
...
...
@@ -682,7 +683,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
#endif
/* #else #ifdef CONFIG_PPC */
/*
* Wrappers for the rcu_node::lock acquire.
* Wrappers for the rcu_node::lock acquire
and release
.
*
* Because the rcu_nodes form a tree, the tree traversal locking will observe
* different lock values, this in turn means that an UNLOCK of one level
...
...
@@ -691,29 +692,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
*
* In order to restore full ordering between tree levels, augment the regular
* lock acquire functions with smp_mb__after_unlock_lock().
*
* As ->lock of struct rcu_node is a __private field, therefore one should use
* these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
*/
static
inline
void
raw_spin_lock_rcu_node
(
struct
rcu_node
*
rnp
)
{
raw_spin_lock
(
&
rnp
->
lock
);
raw_spin_lock
(
&
ACCESS_PRIVATE
(
rnp
,
lock
)
);
smp_mb__after_unlock_lock
();
}
static
inline
void
raw_spin_unlock_rcu_node
(
struct
rcu_node
*
rnp
)
{
raw_spin_unlock
(
&
ACCESS_PRIVATE
(
rnp
,
lock
));
}
static
inline
void
raw_spin_lock_irq_rcu_node
(
struct
rcu_node
*
rnp
)
{
raw_spin_lock_irq
(
&
rnp
->
lock
);
raw_spin_lock_irq
(
&
ACCESS_PRIVATE
(
rnp
,
lock
)
);
smp_mb__after_unlock_lock
();
}
#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
do { \
typecheck(unsigned long, flags); \
raw_spin_lock_irqsave(&(rnp)->lock, flags); \
smp_mb__after_unlock_lock(); \
static
inline
void
raw_spin_unlock_irq_rcu_node
(
struct
rcu_node
*
rnp
)
{
raw_spin_unlock_irq
(
&
ACCESS_PRIVATE
(
rnp
,
lock
));
}
#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
do { \
typecheck(unsigned long, flags); \
raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
smp_mb__after_unlock_lock(); \
} while (0)
#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
do { \
typecheck(unsigned long, flags); \
raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
} while (0)
static
inline
bool
raw_spin_trylock_rcu_node
(
struct
rcu_node
*
rnp
)
{
bool
locked
=
raw_spin_trylock
(
&
rnp
->
lock
);
bool
locked
=
raw_spin_trylock
(
&
ACCESS_PRIVATE
(
rnp
,
lock
)
);
if
(
locked
)
smp_mb__after_unlock_lock
();
...
...
kernel/rcu/tree_plugin.h
浏览文件 @
8bc6782f
...
...
@@ -235,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
rnp
->
gp_tasks
=
&
t
->
rcu_node_entry
;
if
(
!
rnp
->
exp_tasks
&&
(
blkd_state
&
RCU_EXP_BLKD
))
rnp
->
exp_tasks
=
&
t
->
rcu_node_entry
;
raw_spin_unlock
(
&
rnp
->
lock
);
/*
rrupts remain disabled. */
raw_spin_unlock
_rcu_node
(
rnp
);
/* inte
rrupts remain disabled. */
/*
* Report the quiescent state for the expedited GP. This expedited
...
...
@@ -489,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
!!
rnp
->
gp_tasks
);
rcu_report_unblock_qs_rnp
(
rcu_state_p
,
rnp
,
flags
);
}
else
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
/* Unboost if we were boosted. */
...
...
@@ -518,14 +518,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
if
(
!
rcu_preempt_blocked_readers_cgp
(
rnp
))
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
;
}
t
=
list_entry
(
rnp
->
gp_tasks
->
prev
,
struct
task_struct
,
rcu_node_entry
);
list_for_each_entry_continue
(
t
,
&
rnp
->
blkd_tasks
,
rcu_node_entry
)
sched_show_task
(
t
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
/*
...
...
@@ -807,7 +807,6 @@ void exit_rcu(void)
#else
/* #ifdef CONFIG_PREEMPT_RCU */
static
struct
rcu_state
*
const
rcu_state_p
=
&
rcu_sched_state
;
static
struct
rcu_data
__percpu
*
const
rcu_data_p
=
&
rcu_sched_data
;
/*
* Tell them what RCU they are running.
...
...
@@ -991,7 +990,7 @@ static int rcu_boost(struct rcu_node *rnp)
* might exit their RCU read-side critical sections on their own.
*/
if
(
rnp
->
exp_tasks
==
NULL
&&
rnp
->
boost_tasks
==
NULL
)
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
0
;
}
...
...
@@ -1028,7 +1027,7 @@ static int rcu_boost(struct rcu_node *rnp)
*/
t
=
container_of
(
tb
,
struct
task_struct
,
rcu_node_entry
);
rt_mutex_init_proxy_locked
(
&
rnp
->
boost_mtx
,
t
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
/* Lock only for side effect: boosts task t's priority. */
rt_mutex_lock
(
&
rnp
->
boost_mtx
);
rt_mutex_unlock
(
&
rnp
->
boost_mtx
);
/* Then keep lockdep happy. */
...
...
@@ -1088,7 +1087,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
if
(
!
rcu_preempt_blocked_readers_cgp
(
rnp
)
&&
rnp
->
exp_tasks
==
NULL
)
{
rnp
->
n_balk_exp_gp_tasks
++
;
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
return
;
}
if
(
rnp
->
exp_tasks
!=
NULL
||
...
...
@@ -1098,13 +1097,13 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
ULONG_CMP_GE
(
jiffies
,
rnp
->
boost_time
)))
{
if
(
rnp
->
exp_tasks
==
NULL
)
rnp
->
boost_tasks
=
rnp
->
gp_tasks
;
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
t
=
rnp
->
boost_kthread_task
;
if
(
t
)
rcu_wake_cond
(
t
,
rnp
->
boost_kthread_status
);
}
else
{
rcu_initiate_boost_trace
(
rnp
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
}
...
...
@@ -1172,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return
PTR_ERR
(
t
);
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
rnp
->
boost_kthread_task
=
t
;
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
sp
.
sched_priority
=
kthread_prio
;
sched_setscheduler_nocheck
(
t
,
SCHED_FIFO
,
&
sp
);
wake_up_process
(
t
);
/* get to TASK_INTERRUPTIBLE quickly. */
...
...
@@ -1308,7 +1307,7 @@ static void rcu_prepare_kthreads(int cpu)
static
void
rcu_initiate_boost
(
struct
rcu_node
*
rnp
,
unsigned
long
flags
)
__releases
(
rnp
->
lock
)
{
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
}
static
void
invoke_rcu_callbacks_kthread
(
void
)
...
...
@@ -1559,7 +1558,7 @@ static void rcu_prepare_for_idle(void)
rnp
=
rdp
->
mynode
;
raw_spin_lock_rcu_node
(
rnp
);
/* irqs already disabled. */
needwake
=
rcu_accelerate_cbs
(
rsp
,
rnp
,
rdp
);
raw_spin_unlock
(
&
rnp
->
lock
);
/* irqs remain disabled. */
raw_spin_unlock
_rcu_node
(
rnp
);
/* irqs remain disabled. */
if
(
needwake
)
rcu_gp_kthread_wake
(
rsp
);
}
...
...
@@ -2064,7 +2063,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
raw_spin_lock_irqsave_rcu_node
(
rnp
,
flags
);
needwake
=
rcu_start_future_gp
(
rnp
,
rdp
,
&
c
);
raw_spin_unlock_irqrestore
(
&
rnp
->
lock
,
flags
);
raw_spin_unlock_irqrestore
_rcu_node
(
rnp
,
flags
);
if
(
needwake
)
rcu_gp_kthread_wake
(
rdp
->
rsp
);
...
...
scripts/checkpatch.pl
浏览文件 @
8bc6782f
...
...
@@ -269,7 +269,8 @@ our $Sparse = qr{
__init_refok|
__kprobes|
__ref|
__rcu
__rcu|
__private
}
x
;
our
$InitAttributePrefix
=
qr{__(?:mem|cpu|dev|net_|)}
;
our
$InitAttributeData
=
qr{$InitAttributePrefix(?:initdata\b)}
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录