提交 7d0ae808 编写于 作者: P Paul E. McKenney

rcu: Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE()

This commit moves from the old ACCESS_ONCE() API to the new READ_ONCE()
and WRITE_ONCE() APIs.
Signed-off-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
[ paulmck:  Updated to include kernel/torture.c as suggested by Jason Low. ]
上级 030bbdbf
...@@ -29,8 +29,8 @@ ...@@ -29,8 +29,8 @@
*/ */
static inline void INIT_LIST_HEAD_RCU(struct list_head *list) static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{ {
ACCESS_ONCE(list->next) = list; WRITE_ONCE(list->next, list);
ACCESS_ONCE(list->prev) = list; WRITE_ONCE(list->prev, list);
} }
/* /*
...@@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list, ...@@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
#define list_first_or_null_rcu(ptr, type, member) \ #define list_first_or_null_rcu(ptr, type, member) \
({ \ ({ \
struct list_head *__ptr = (ptr); \ struct list_head *__ptr = (ptr); \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \ struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
}) })
......
...@@ -364,8 +364,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu; ...@@ -364,8 +364,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \ #define rcu_note_voluntary_context_switch(t) \
do { \ do { \
rcu_all_qs(); \ rcu_all_qs(); \
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ if (READ_ONCE((t)->rcu_tasks_holdout)) \
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0) } while (0)
#else /* #ifdef CONFIG_TASKS_RCU */ #else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0) #define TASKS_RCU(x) do { } while (0)
...@@ -609,7 +609,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -609,7 +609,7 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_pointer(p, space) \ #define __rcu_access_pointer(p, space) \
({ \ ({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \ rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \ ((typeof(*p) __force __kernel *)(_________p1)); \
}) })
...@@ -630,7 +630,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -630,7 +630,7 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_index(p, space) \ #define __rcu_access_index(p, space) \
({ \ ({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \ typeof(p) _________p1 = READ_ONCE(p); \
rcu_dereference_sparse(p, space); \ rcu_dereference_sparse(p, space); \
(_________p1); \ (_________p1); \
}) })
...@@ -659,7 +659,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -659,7 +659,7 @@ static inline void rcu_preempt_sleep_check(void)
*/ */
#define lockless_dereference(p) \ #define lockless_dereference(p) \
({ \ ({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \ typeof(p) _________p1 = READ_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \ (_________p1); \
}) })
...@@ -702,7 +702,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -702,7 +702,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The pointer to read * @p: The pointer to read
* *
* Return the value of the specified RCU-protected pointer, but omit the * Return the value of the specified RCU-protected pointer, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not * when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against * dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where * NULL. Although rcu_access_pointer() may also be used in cases where
...@@ -791,7 +791,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -791,7 +791,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The index to read * @p: The index to read
* *
* Return the value of the specified RCU-protected index, but omit the * Return the value of the specified RCU-protected index, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this index is accessed, but the index is not * when the value of this index is accessed, but the index is not
* dereferenced, for example, when testing an RCU-protected index against * dereferenced, for example, when testing an RCU-protected index against
* -1. Although rcu_access_index() may also be used in cases where * -1. Although rcu_access_index() may also be used in cases where
...@@ -827,7 +827,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -827,7 +827,7 @@ static inline void rcu_preempt_sleep_check(void)
* @c: The conditions under which the dereference will take place * @c: The conditions under which the dereference will take place
* *
* Return the value of the specified RCU-protected pointer, but omit * Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This * both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the * is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not- * pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it * prevent the compiler from repeating this reference or combining it
......
...@@ -1413,7 +1413,7 @@ static int rcu_torture_barrier_cbs(void *arg) ...@@ -1413,7 +1413,7 @@ static int rcu_torture_barrier_cbs(void *arg)
do { do {
wait_event(barrier_cbs_wq[myid], wait_event(barrier_cbs_wq[myid],
(newphase = (newphase =
ACCESS_ONCE(barrier_phase)) != lastphase || READ_ONCE(barrier_phase)) != lastphase ||
torture_must_stop()); torture_must_stop());
lastphase = newphase; lastphase = newphase;
smp_mb(); /* ensure barrier_phase load before ->call(). */ smp_mb(); /* ensure barrier_phase load before ->call(). */
......
...@@ -151,7 +151,7 @@ static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx) ...@@ -151,7 +151,7 @@ static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
unsigned long t; unsigned long t;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
sum += t; sum += t;
} }
return sum; return sum;
...@@ -168,7 +168,7 @@ static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx) ...@@ -168,7 +168,7 @@ static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
unsigned long t; unsigned long t;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
sum += t; sum += t;
} }
return sum; return sum;
...@@ -265,8 +265,8 @@ static int srcu_readers_active(struct srcu_struct *sp) ...@@ -265,8 +265,8 @@ static int srcu_readers_active(struct srcu_struct *sp)
unsigned long sum = 0; unsigned long sum = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
} }
return sum; return sum;
} }
...@@ -296,7 +296,7 @@ int __srcu_read_lock(struct srcu_struct *sp) ...@@ -296,7 +296,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
{ {
int idx; int idx;
idx = ACCESS_ONCE(sp->completed) & 0x1; idx = READ_ONCE(sp->completed) & 0x1;
preempt_disable(); preempt_disable();
__this_cpu_inc(sp->per_cpu_ref->c[idx]); __this_cpu_inc(sp->per_cpu_ref->c[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */ smp_mb(); /* B */ /* Avoid leaking the critical section. */
......
...@@ -144,16 +144,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp) ...@@ -144,16 +144,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
return; return;
rcp->ticks_this_gp++; rcp->ticks_this_gp++;
j = jiffies; j = jiffies;
js = ACCESS_ONCE(rcp->jiffies_stall); js = READ_ONCE(rcp->jiffies_stall);
if (rcp->rcucblist && ULONG_CMP_GE(j, js)) { if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE, rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
jiffies - rcp->gp_start, rcp->qlen); jiffies - rcp->gp_start, rcp->qlen);
dump_stack(); dump_stack();
ACCESS_ONCE(rcp->jiffies_stall) = jiffies + WRITE_ONCE(rcp->jiffies_stall,
3 * rcu_jiffies_till_stall_check() + 3; jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
} else if (ULONG_CMP_GE(j, js)) { } else if (ULONG_CMP_GE(j, js)) {
ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); WRITE_ONCE(rcp->jiffies_stall,
jiffies + rcu_jiffies_till_stall_check());
} }
} }
...@@ -161,7 +162,8 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) ...@@ -161,7 +162,8 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
{ {
rcp->ticks_this_gp = 0; rcp->ticks_this_gp = 0;
rcp->gp_start = jiffies; rcp->gp_start = jiffies;
ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); WRITE_ONCE(rcp->jiffies_stall,
jiffies + rcu_jiffies_till_stall_check());
} }
static void check_cpu_stalls(void) static void check_cpu_stalls(void)
......
此差异已折叠。
...@@ -570,7 +570,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp) ...@@ -570,7 +570,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
{ {
return !rcu_preempted_readers_exp(rnp) && return !rcu_preempted_readers_exp(rnp) &&
ACCESS_ONCE(rnp->expmask) == 0; READ_ONCE(rnp->expmask) == 0;
} }
/* /*
...@@ -716,7 +716,7 @@ void synchronize_rcu_expedited(void) ...@@ -716,7 +716,7 @@ void synchronize_rcu_expedited(void)
int trycount = 0; int trycount = 0;
smp_mb(); /* Caller's modifications seen first by other CPUs. */ smp_mb(); /* Caller's modifications seen first by other CPUs. */
snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
smp_mb(); /* Above access cannot bleed into critical section. */ smp_mb(); /* Above access cannot bleed into critical section. */
/* /*
...@@ -740,7 +740,7 @@ void synchronize_rcu_expedited(void) ...@@ -740,7 +740,7 @@ void synchronize_rcu_expedited(void)
*/ */
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
if (ULONG_CMP_LT(snap, if (ULONG_CMP_LT(snap,
ACCESS_ONCE(sync_rcu_preempt_exp_count))) { READ_ONCE(sync_rcu_preempt_exp_count))) {
put_online_cpus(); put_online_cpus();
goto mb_ret; /* Others did our work for us. */ goto mb_ret; /* Others did our work for us. */
} }
...@@ -752,7 +752,7 @@ void synchronize_rcu_expedited(void) ...@@ -752,7 +752,7 @@ void synchronize_rcu_expedited(void)
return; return;
} }
} }
if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) { if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) {
put_online_cpus(); put_online_cpus();
goto unlock_mb_ret; /* Others did our work for us. */ goto unlock_mb_ret; /* Others did our work for us. */
} }
...@@ -780,8 +780,7 @@ void synchronize_rcu_expedited(void) ...@@ -780,8 +780,7 @@ void synchronize_rcu_expedited(void)
/* Clean up and exit. */ /* Clean up and exit. */
smp_mb(); /* ensure expedited GP seen before counter increment. */ smp_mb(); /* ensure expedited GP seen before counter increment. */
ACCESS_ONCE(sync_rcu_preempt_exp_count) = WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1);
sync_rcu_preempt_exp_count + 1;
unlock_mb_ret: unlock_mb_ret:
mutex_unlock(&sync_rcu_preempt_exp_mutex); mutex_unlock(&sync_rcu_preempt_exp_mutex);
mb_ret: mb_ret:
...@@ -994,8 +993,8 @@ static int rcu_boost(struct rcu_node *rnp) ...@@ -994,8 +993,8 @@ static int rcu_boost(struct rcu_node *rnp)
struct task_struct *t; struct task_struct *t;
struct list_head *tb; struct list_head *tb;
if (ACCESS_ONCE(rnp->exp_tasks) == NULL && if (READ_ONCE(rnp->exp_tasks) == NULL &&
ACCESS_ONCE(rnp->boost_tasks) == NULL) READ_ONCE(rnp->boost_tasks) == NULL)
return 0; /* Nothing left to boost. */ return 0; /* Nothing left to boost. */
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
...@@ -1048,8 +1047,8 @@ static int rcu_boost(struct rcu_node *rnp) ...@@ -1048,8 +1047,8 @@ static int rcu_boost(struct rcu_node *rnp)
rt_mutex_lock(&rnp->boost_mtx); rt_mutex_lock(&rnp->boost_mtx);
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
return ACCESS_ONCE(rnp->exp_tasks) != NULL || return READ_ONCE(rnp->exp_tasks) != NULL ||
ACCESS_ONCE(rnp->boost_tasks) != NULL; READ_ONCE(rnp->boost_tasks) != NULL;
} }
/* /*
...@@ -1462,7 +1461,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) ...@@ -1462,7 +1461,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
* callbacks not yet ready to invoke. * callbacks not yet ready to invoke.
*/ */
if ((rdp->completed != rnp->completed || if ((rdp->completed != rnp->completed ||
unlikely(ACCESS_ONCE(rdp->gpwrap))) && unlikely(READ_ONCE(rdp->gpwrap))) &&
rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
note_gp_changes(rsp, rdp); note_gp_changes(rsp, rdp);
...@@ -1534,7 +1533,7 @@ static void rcu_prepare_for_idle(void) ...@@ -1534,7 +1533,7 @@ static void rcu_prepare_for_idle(void)
int tne; int tne;
/* Handle nohz enablement switches conservatively. */ /* Handle nohz enablement switches conservatively. */
tne = ACCESS_ONCE(tick_nohz_active); tne = READ_ONCE(tick_nohz_active);
if (tne != rdtp->tick_nohz_enabled_snap) { if (tne != rdtp->tick_nohz_enabled_snap) {
if (rcu_cpu_has_callbacks(NULL)) if (rcu_cpu_has_callbacks(NULL))
invoke_rcu_core(); /* force nohz to see update. */ invoke_rcu_core(); /* force nohz to see update. */
...@@ -1760,7 +1759,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) ...@@ -1760,7 +1759,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
atomic_read(&rdtp->dynticks) & 0xfff, atomic_read(&rdtp->dynticks) & 0xfff,
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
fast_no_hz); fast_no_hz);
} }
...@@ -1898,11 +1897,11 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) ...@@ -1898,11 +1897,11 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
{ {
struct rcu_data *rdp_leader = rdp->nocb_leader; struct rcu_data *rdp_leader = rdp->nocb_leader;
if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) if (!READ_ONCE(rdp_leader->nocb_kthread))
return; return;
if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
/* Prior smp_mb__after_atomic() orders against prior enqueue. */ /* Prior smp_mb__after_atomic() orders against prior enqueue. */
ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
wake_up(&rdp_leader->nocb_wq); wake_up(&rdp_leader->nocb_wq);
} }
} }
...@@ -1934,14 +1933,14 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) ...@@ -1934,14 +1933,14 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
ret = atomic_long_read(&rdp->nocb_q_count); ret = atomic_long_read(&rdp->nocb_q_count);
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
rhp = ACCESS_ONCE(rdp->nocb_head); rhp = READ_ONCE(rdp->nocb_head);
if (!rhp) if (!rhp)
rhp = ACCESS_ONCE(rdp->nocb_gp_head); rhp = READ_ONCE(rdp->nocb_gp_head);
if (!rhp) if (!rhp)
rhp = ACCESS_ONCE(rdp->nocb_follower_head); rhp = READ_ONCE(rdp->nocb_follower_head);
/* Having no rcuo kthread but CBs after scheduler starts is bad! */ /* Having no rcuo kthread but CBs after scheduler starts is bad! */
if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp && if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
rcu_scheduler_fully_active) { rcu_scheduler_fully_active) {
/* RCU callback enqueued before CPU first came online??? */ /* RCU callback enqueued before CPU first came online??? */
pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
...@@ -1975,12 +1974,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, ...@@ -1975,12 +1974,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
atomic_long_add(rhcount, &rdp->nocb_q_count); atomic_long_add(rhcount, &rdp->nocb_q_count);
/* rcu_barrier() relies on ->nocb_q_count add before xchg. */ /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
old_rhpp = xchg(&rdp->nocb_tail, rhtp); old_rhpp = xchg(&rdp->nocb_tail, rhtp);
ACCESS_ONCE(*old_rhpp) = rhp; WRITE_ONCE(*old_rhpp, rhp);
atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
/* If we are not being polled and there is a kthread, awaken it ... */ /* If we are not being polled and there is a kthread, awaken it ... */
t = ACCESS_ONCE(rdp->nocb_kthread); t = READ_ONCE(rdp->nocb_kthread);
if (rcu_nocb_poll || !t) { if (rcu_nocb_poll || !t) {
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("WakeNotPoll")); TPS("WakeNotPoll"));
...@@ -2118,7 +2117,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2118,7 +2117,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
for (;;) { for (;;) {
wait_event_interruptible( wait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1], rnp->nocb_gp_wq[c & 0x1],
(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
if (likely(d)) if (likely(d))
break; break;
WARN_ON(signal_pending(current)); WARN_ON(signal_pending(current));
...@@ -2145,7 +2144,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp) ...@@ -2145,7 +2144,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
if (!rcu_nocb_poll) { if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
wait_event_interruptible(my_rdp->nocb_wq, wait_event_interruptible(my_rdp->nocb_wq,
!ACCESS_ONCE(my_rdp->nocb_leader_sleep)); !READ_ONCE(my_rdp->nocb_leader_sleep));
/* Memory barrier handled by smp_mb() calls below and repoll. */ /* Memory barrier handled by smp_mb() calls below and repoll. */
} else if (firsttime) { } else if (firsttime) {
firsttime = false; /* Don't drown trace log with "Poll"! */ firsttime = false; /* Don't drown trace log with "Poll"! */
...@@ -2159,12 +2158,12 @@ static void nocb_leader_wait(struct rcu_data *my_rdp) ...@@ -2159,12 +2158,12 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
*/ */
gotcbs = false; gotcbs = false;
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head); rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
if (!rdp->nocb_gp_head) if (!rdp->nocb_gp_head)
continue; /* No CBs here, try next follower. */ continue; /* No CBs here, try next follower. */
/* Move callbacks to wait-for-GP list, which is empty. */ /* Move callbacks to wait-for-GP list, which is empty. */
ACCESS_ONCE(rdp->nocb_head) = NULL; WRITE_ONCE(rdp->nocb_head, NULL);
rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
gotcbs = true; gotcbs = true;
} }
...@@ -2184,7 +2183,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp) ...@@ -2184,7 +2183,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
my_rdp->nocb_leader_sleep = true; my_rdp->nocb_leader_sleep = true;
smp_mb(); /* Ensure _sleep true before scan. */ smp_mb(); /* Ensure _sleep true before scan. */
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
if (ACCESS_ONCE(rdp->nocb_head)) { if (READ_ONCE(rdp->nocb_head)) {
/* Found CB, so short-circuit next wait. */ /* Found CB, so short-circuit next wait. */
my_rdp->nocb_leader_sleep = false; my_rdp->nocb_leader_sleep = false;
break; break;
...@@ -2205,7 +2204,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp) ...@@ -2205,7 +2204,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
/* Each pass through the following loop wakes a follower, if needed. */ /* Each pass through the following loop wakes a follower, if needed. */
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
if (ACCESS_ONCE(rdp->nocb_head)) if (READ_ONCE(rdp->nocb_head))
my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
if (!rdp->nocb_gp_head) if (!rdp->nocb_gp_head)
continue; /* No CBs, so no need to wake follower. */ continue; /* No CBs, so no need to wake follower. */
...@@ -2241,7 +2240,7 @@ static void nocb_follower_wait(struct rcu_data *rdp) ...@@ -2241,7 +2240,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
"FollowerSleep"); "FollowerSleep");
wait_event_interruptible(rdp->nocb_wq, wait_event_interruptible(rdp->nocb_wq,
ACCESS_ONCE(rdp->nocb_follower_head)); READ_ONCE(rdp->nocb_follower_head));
} else if (firsttime) { } else if (firsttime) {
/* Don't drown trace log with "Poll"! */ /* Don't drown trace log with "Poll"! */
firsttime = false; firsttime = false;
...@@ -2282,10 +2281,10 @@ static int rcu_nocb_kthread(void *arg) ...@@ -2282,10 +2281,10 @@ static int rcu_nocb_kthread(void *arg)
nocb_follower_wait(rdp); nocb_follower_wait(rdp);
/* Pull the ready-to-invoke callbacks onto local list. */ /* Pull the ready-to-invoke callbacks onto local list. */
list = ACCESS_ONCE(rdp->nocb_follower_head); list = READ_ONCE(rdp->nocb_follower_head);
BUG_ON(!list); BUG_ON(!list);
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
ACCESS_ONCE(rdp->nocb_follower_head) = NULL; WRITE_ONCE(rdp->nocb_follower_head, NULL);
tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
/* Each pass through the following loop invokes a callback. */ /* Each pass through the following loop invokes a callback. */
...@@ -2324,7 +2323,7 @@ static int rcu_nocb_kthread(void *arg) ...@@ -2324,7 +2323,7 @@ static int rcu_nocb_kthread(void *arg)
/* Is a deferred wakeup of rcu_nocb_kthread() required? */ /* Is a deferred wakeup of rcu_nocb_kthread() required? */
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
{ {
return ACCESS_ONCE(rdp->nocb_defer_wakeup); return READ_ONCE(rdp->nocb_defer_wakeup);
} }
/* Do a deferred wakeup of rcu_nocb_kthread(). */ /* Do a deferred wakeup of rcu_nocb_kthread(). */
...@@ -2334,8 +2333,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) ...@@ -2334,8 +2333,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
if (!rcu_nocb_need_deferred_wakeup(rdp)) if (!rcu_nocb_need_deferred_wakeup(rdp))
return; return;
ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup); ndw = READ_ONCE(rdp->nocb_defer_wakeup);
ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT; WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT);
wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE); wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
} }
...@@ -2448,7 +2447,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) ...@@ -2448,7 +2447,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
t = kthread_run(rcu_nocb_kthread, rdp_spawn, t = kthread_run(rcu_nocb_kthread, rdp_spawn,
"rcuo%c/%d", rsp->abbr, cpu); "rcuo%c/%d", rsp->abbr, cpu);
BUG_ON(IS_ERR(t)); BUG_ON(IS_ERR(t));
ACCESS_ONCE(rdp_spawn->nocb_kthread) = t; WRITE_ONCE(rdp_spawn->nocb_kthread, t);
} }
/* /*
...@@ -2663,7 +2662,7 @@ static void rcu_sysidle_enter(int irq) ...@@ -2663,7 +2662,7 @@ static void rcu_sysidle_enter(int irq)
/* Record start of fully idle period. */ /* Record start of fully idle period. */
j = jiffies; j = jiffies;
ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
smp_mb__before_atomic(); smp_mb__before_atomic();
atomic_inc(&rdtp->dynticks_idle); atomic_inc(&rdtp->dynticks_idle);
smp_mb__after_atomic(); smp_mb__after_atomic();
...@@ -2681,7 +2680,7 @@ static void rcu_sysidle_enter(int irq) ...@@ -2681,7 +2680,7 @@ static void rcu_sysidle_enter(int irq)
*/ */
void rcu_sysidle_force_exit(void) void rcu_sysidle_force_exit(void)
{ {
int oldstate = ACCESS_ONCE(full_sysidle_state); int oldstate = READ_ONCE(full_sysidle_state);
int newoldstate; int newoldstate;
/* /*
...@@ -2794,7 +2793,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, ...@@ -2794,7 +2793,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
smp_mb(); /* Read counters before timestamps. */ smp_mb(); /* Read counters before timestamps. */
/* Pick up timestamps. */ /* Pick up timestamps. */
j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies); j = READ_ONCE(rdtp->dynticks_idle_jiffies);
/* If this CPU entered idle more recently, update maxj timestamp. */ /* If this CPU entered idle more recently, update maxj timestamp. */
if (ULONG_CMP_LT(*maxj, j)) if (ULONG_CMP_LT(*maxj, j))
*maxj = j; *maxj = j;
...@@ -2831,11 +2830,11 @@ static unsigned long rcu_sysidle_delay(void) ...@@ -2831,11 +2830,11 @@ static unsigned long rcu_sysidle_delay(void)
static void rcu_sysidle(unsigned long j) static void rcu_sysidle(unsigned long j)
{ {
/* Check the current state. */ /* Check the current state. */
switch (ACCESS_ONCE(full_sysidle_state)) { switch (READ_ONCE(full_sysidle_state)) {
case RCU_SYSIDLE_NOT: case RCU_SYSIDLE_NOT:
/* First time all are idle, so note a short idle period. */ /* First time all are idle, so note a short idle period. */
ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT; WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT);
break; break;
case RCU_SYSIDLE_SHORT: case RCU_SYSIDLE_SHORT:
...@@ -2873,7 +2872,7 @@ static void rcu_sysidle_cancel(void) ...@@ -2873,7 +2872,7 @@ static void rcu_sysidle_cancel(void)
{ {
smp_mb(); smp_mb();
if (full_sysidle_state > RCU_SYSIDLE_SHORT) if (full_sysidle_state > RCU_SYSIDLE_SHORT)
ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT);
} }
/* /*
...@@ -2925,7 +2924,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp) ...@@ -2925,7 +2924,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
smp_mb(); /* grace period precedes setting inuse. */ smp_mb(); /* grace period precedes setting inuse. */
rshp = container_of(rhp, struct rcu_sysidle_head, rh); rshp = container_of(rhp, struct rcu_sysidle_head, rh);
ACCESS_ONCE(rshp->inuse) = 0; WRITE_ONCE(rshp->inuse, 0);
} }
/* /*
...@@ -2936,7 +2935,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp) ...@@ -2936,7 +2935,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
bool rcu_sys_is_idle(void) bool rcu_sys_is_idle(void)
{ {
static struct rcu_sysidle_head rsh; static struct rcu_sysidle_head rsh;
int rss = ACCESS_ONCE(full_sysidle_state); int rss = READ_ONCE(full_sysidle_state);
if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu)) if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
return false; return false;
...@@ -2964,7 +2963,7 @@ bool rcu_sys_is_idle(void) ...@@ -2964,7 +2963,7 @@ bool rcu_sys_is_idle(void)
} }
rcu_sysidle_report(rcu_state_p, isidle, maxj, false); rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
oldrss = rss; oldrss = rss;
rss = ACCESS_ONCE(full_sysidle_state); rss = READ_ONCE(full_sysidle_state);
} }
} }
...@@ -3048,7 +3047,7 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp) ...@@ -3048,7 +3047,7 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
#ifdef CONFIG_NO_HZ_FULL #ifdef CONFIG_NO_HZ_FULL
if (tick_nohz_full_cpu(smp_processor_id()) && if (tick_nohz_full_cpu(smp_processor_id()) &&
(!rcu_gp_in_progress(rsp) || (!rcu_gp_in_progress(rsp) ||
ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ))) ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
return 1; return 1;
#endif /* #ifdef CONFIG_NO_HZ_FULL */ #endif /* #ifdef CONFIG_NO_HZ_FULL */
return 0; return 0;
...@@ -3077,7 +3076,7 @@ static void rcu_bind_gp_kthread(void) ...@@ -3077,7 +3076,7 @@ static void rcu_bind_gp_kthread(void)
static void rcu_dynticks_task_enter(void) static void rcu_dynticks_task_enter(void)
{ {
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id(); WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
} }
...@@ -3085,6 +3084,6 @@ static void rcu_dynticks_task_enter(void) ...@@ -3085,6 +3084,6 @@ static void rcu_dynticks_task_enter(void)
static void rcu_dynticks_task_exit(void) static void rcu_dynticks_task_exit(void)
{ {
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1; WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
} }
...@@ -277,7 +277,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) ...@@ -277,7 +277,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
rsp->n_force_qs, rsp->n_force_qs_ngp, rsp->n_force_qs, rsp->n_force_qs_ngp,
rsp->n_force_qs - rsp->n_force_qs_ngp, rsp->n_force_qs - rsp->n_force_qs_ngp,
ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen); READ_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
if (rnp->level != level) { if (rnp->level != level) {
seq_puts(m, "\n"); seq_puts(m, "\n");
...@@ -323,8 +323,8 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp) ...@@ -323,8 +323,8 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
struct rcu_node *rnp = &rsp->node[0]; struct rcu_node *rnp = &rsp->node[0];
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
completed = ACCESS_ONCE(rsp->completed); completed = READ_ONCE(rsp->completed);
gpnum = ACCESS_ONCE(rsp->gpnum); gpnum = READ_ONCE(rsp->gpnum);
if (completed == gpnum) if (completed == gpnum)
gpage = 0; gpage = 0;
else else
......
...@@ -150,14 +150,14 @@ void __rcu_read_unlock(void) ...@@ -150,14 +150,14 @@ void __rcu_read_unlock(void)
barrier(); /* critical section before exit code. */ barrier(); /* critical section before exit code. */
t->rcu_read_lock_nesting = INT_MIN; t->rcu_read_lock_nesting = INT_MIN;
barrier(); /* assign before ->rcu_read_unlock_special load */ barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s))) if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t); rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */ barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0; t->rcu_read_lock_nesting = 0;
} }
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
{ {
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); int rrln = READ_ONCE(t->rcu_read_lock_nesting);
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
} }
...@@ -389,17 +389,17 @@ module_param(rcu_cpu_stall_timeout, int, 0644); ...@@ -389,17 +389,17 @@ module_param(rcu_cpu_stall_timeout, int, 0644);
int rcu_jiffies_till_stall_check(void) int rcu_jiffies_till_stall_check(void)
{ {
int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
/* /*
* Limit check must be consistent with the Kconfig limits * Limit check must be consistent with the Kconfig limits
* for CONFIG_RCU_CPU_STALL_TIMEOUT. * for CONFIG_RCU_CPU_STALL_TIMEOUT.
*/ */
if (till_stall_check < 3) { if (till_stall_check < 3) {
ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; WRITE_ONCE(rcu_cpu_stall_timeout, 3);
till_stall_check = 3; till_stall_check = 3;
} else if (till_stall_check > 300) { } else if (till_stall_check > 300) {
ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; WRITE_ONCE(rcu_cpu_stall_timeout, 300);
till_stall_check = 300; till_stall_check = 300;
} }
return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
...@@ -550,12 +550,12 @@ static void check_holdout_task(struct task_struct *t, ...@@ -550,12 +550,12 @@ static void check_holdout_task(struct task_struct *t,
{ {
int cpu; int cpu;
if (!ACCESS_ONCE(t->rcu_tasks_holdout) || if (!READ_ONCE(t->rcu_tasks_holdout) ||
t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) || t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
!ACCESS_ONCE(t->on_rq) || !READ_ONCE(t->on_rq) ||
(IS_ENABLED(CONFIG_NO_HZ_FULL) && (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
!is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
ACCESS_ONCE(t->rcu_tasks_holdout) = false; WRITE_ONCE(t->rcu_tasks_holdout, false);
list_del_init(&t->rcu_tasks_holdout_list); list_del_init(&t->rcu_tasks_holdout_list);
put_task_struct(t); put_task_struct(t);
return; return;
...@@ -639,11 +639,11 @@ static int __noreturn rcu_tasks_kthread(void *arg) ...@@ -639,11 +639,11 @@ static int __noreturn rcu_tasks_kthread(void *arg)
*/ */
rcu_read_lock(); rcu_read_lock();
for_each_process_thread(g, t) { for_each_process_thread(g, t) {
if (t != current && ACCESS_ONCE(t->on_rq) && if (t != current && READ_ONCE(t->on_rq) &&
!is_idle_task(t)) { !is_idle_task(t)) {
get_task_struct(t); get_task_struct(t);
t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw); t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
ACCESS_ONCE(t->rcu_tasks_holdout) = true; WRITE_ONCE(t->rcu_tasks_holdout, true);
list_add(&t->rcu_tasks_holdout_list, list_add(&t->rcu_tasks_holdout_list,
&rcu_tasks_holdouts); &rcu_tasks_holdouts);
} }
...@@ -672,7 +672,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) ...@@ -672,7 +672,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
struct task_struct *t1; struct task_struct *t1;
schedule_timeout_interruptible(HZ); schedule_timeout_interruptible(HZ);
rtst = ACCESS_ONCE(rcu_task_stall_timeout); rtst = READ_ONCE(rcu_task_stall_timeout);
needreport = rtst > 0 && needreport = rtst > 0 &&
time_after(jiffies, lastreport + rtst); time_after(jiffies, lastreport + rtst);
if (needreport) if (needreport)
...@@ -728,7 +728,7 @@ static void rcu_spawn_tasks_kthread(void) ...@@ -728,7 +728,7 @@ static void rcu_spawn_tasks_kthread(void)
static struct task_struct *rcu_tasks_kthread_ptr; static struct task_struct *rcu_tasks_kthread_ptr;
struct task_struct *t; struct task_struct *t;
if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) { if (READ_ONCE(rcu_tasks_kthread_ptr)) {
smp_mb(); /* Ensure caller sees full kthread. */ smp_mb(); /* Ensure caller sees full kthread. */
return; return;
} }
...@@ -740,7 +740,7 @@ static void rcu_spawn_tasks_kthread(void) ...@@ -740,7 +740,7 @@ static void rcu_spawn_tasks_kthread(void)
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
BUG_ON(IS_ERR(t)); BUG_ON(IS_ERR(t));
smp_mb(); /* Ensure others see full kthread. */ smp_mb(); /* Ensure others see full kthread. */
ACCESS_ONCE(rcu_tasks_kthread_ptr) = t; WRITE_ONCE(rcu_tasks_kthread_ptr, t);
mutex_unlock(&rcu_tasks_kthread_mutex); mutex_unlock(&rcu_tasks_kthread_mutex);
} }
......
...@@ -409,7 +409,7 @@ static void (*torture_shutdown_hook)(void); ...@@ -409,7 +409,7 @@ static void (*torture_shutdown_hook)(void);
*/ */
void torture_shutdown_absorb(const char *title) void torture_shutdown_absorb(const char *title)
{ {
while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_notice("torture thread %s parking due to system shutdown\n", pr_notice("torture thread %s parking due to system shutdown\n",
title); title);
schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
...@@ -480,9 +480,9 @@ static int torture_shutdown_notify(struct notifier_block *unused1, ...@@ -480,9 +480,9 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3) unsigned long unused2, void *unused3)
{ {
mutex_lock(&fullstop_mutex); mutex_lock(&fullstop_mutex);
if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) { if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected"); VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN; WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN);
} else { } else {
pr_warn("Concurrent rmmod and shutdown illegal!\n"); pr_warn("Concurrent rmmod and shutdown illegal!\n");
} }
...@@ -523,13 +523,13 @@ static int stutter; ...@@ -523,13 +523,13 @@ static int stutter;
*/ */
void stutter_wait(const char *title) void stutter_wait(const char *title)
{ {
while (ACCESS_ONCE(stutter_pause_test) || while (READ_ONCE(stutter_pause_test) ||
(torture_runnable && !ACCESS_ONCE(*torture_runnable))) { (torture_runnable && !READ_ONCE(*torture_runnable))) {
if (stutter_pause_test) if (stutter_pause_test)
if (ACCESS_ONCE(stutter_pause_test) == 1) if (READ_ONCE(stutter_pause_test) == 1)
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
else else
while (ACCESS_ONCE(stutter_pause_test)) while (READ_ONCE(stutter_pause_test))
cond_resched(); cond_resched();
else else
schedule_timeout_interruptible(round_jiffies_relative(HZ)); schedule_timeout_interruptible(round_jiffies_relative(HZ));
...@@ -549,14 +549,14 @@ static int torture_stutter(void *arg) ...@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
if (!torture_must_stop()) { if (!torture_must_stop()) {
if (stutter > 1) { if (stutter > 1) {
schedule_timeout_interruptible(stutter - 1); schedule_timeout_interruptible(stutter - 1);
ACCESS_ONCE(stutter_pause_test) = 2; WRITE_ONCE(stutter_pause_test, 2);
} }
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
ACCESS_ONCE(stutter_pause_test) = 1; WRITE_ONCE(stutter_pause_test, 1);
} }
if (!torture_must_stop()) if (!torture_must_stop())
schedule_timeout_interruptible(stutter); schedule_timeout_interruptible(stutter);
ACCESS_ONCE(stutter_pause_test) = 0; WRITE_ONCE(stutter_pause_test, 0);
torture_shutdown_absorb("torture_stutter"); torture_shutdown_absorb("torture_stutter");
} while (!torture_must_stop()); } while (!torture_must_stop());
torture_kthread_stopping("torture_stutter"); torture_kthread_stopping("torture_stutter");
...@@ -642,13 +642,13 @@ EXPORT_SYMBOL_GPL(torture_init_end); ...@@ -642,13 +642,13 @@ EXPORT_SYMBOL_GPL(torture_init_end);
bool torture_cleanup_begin(void) bool torture_cleanup_begin(void)
{ {
mutex_lock(&fullstop_mutex); mutex_lock(&fullstop_mutex);
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_warn("Concurrent rmmod and shutdown illegal!\n"); pr_warn("Concurrent rmmod and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex); mutex_unlock(&fullstop_mutex);
schedule_timeout_uninterruptible(10); schedule_timeout_uninterruptible(10);
return true; return true;
} }
ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD; WRITE_ONCE(fullstop, FULLSTOP_RMMOD);
mutex_unlock(&fullstop_mutex); mutex_unlock(&fullstop_mutex);
torture_shutdown_cleanup(); torture_shutdown_cleanup();
torture_shuffle_cleanup(); torture_shuffle_cleanup();
...@@ -681,7 +681,7 @@ EXPORT_SYMBOL_GPL(torture_must_stop); ...@@ -681,7 +681,7 @@ EXPORT_SYMBOL_GPL(torture_must_stop);
*/ */
bool torture_must_stop_irq(void) bool torture_must_stop_irq(void)
{ {
return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP; return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP;
} }
EXPORT_SYMBOL_GPL(torture_must_stop_irq); EXPORT_SYMBOL_GPL(torture_must_stop_irq);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册