提交 8bc6782f 编写于 作者: I Ingo Molnar

Merge commit 'fixes.2015.02.23a' into core/rcu

 Conflicts:
	kernel/rcu/tree.c
Signed-off-by: NIngo Molnar <mingo@kernel.org>
...@@ -20,12 +20,14 @@ ...@@ -20,12 +20,14 @@
# define __pmem __attribute__((noderef, address_space(5))) # define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER #ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4))) # define __rcu __attribute__((noderef, address_space(4)))
#else #else /* CONFIG_SPARSE_RCU_POINTER */
# define __rcu # define __rcu
#endif #endif /* CONFIG_SPARSE_RCU_POINTER */
# define __private __attribute__((noderef))
extern void __chk_user_ptr(const volatile void __user *); extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *); extern void __chk_io_ptr(const volatile void __iomem *);
#else # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else /* __CHECKER__ */
# define __user # define __user
# define __kernel # define __kernel
# define __safe # define __safe
...@@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); ...@@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __percpu # define __percpu
# define __rcu # define __rcu
# define __pmem # define __pmem
#endif # define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
#endif /* __CHECKER__ */
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b #define ___PASTE(a,b) a##b
......
...@@ -137,7 +137,7 @@ struct irq_domain; ...@@ -137,7 +137,7 @@ struct irq_domain;
* @msi_desc: MSI descriptor * @msi_desc: MSI descriptor
*/ */
struct irq_common_data { struct irq_common_data {
unsigned int state_use_accessors; unsigned int __private state_use_accessors;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
unsigned int node; unsigned int node;
#endif #endif
...@@ -208,7 +208,7 @@ enum { ...@@ -208,7 +208,7 @@ enum {
IRQD_FORWARDED_TO_VCPU = (1 << 20), IRQD_FORWARDED_TO_VCPU = (1 << 20),
}; };
#define __irqd_to_state(d) ((d)->common->state_use_accessors) #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
static inline bool irqd_is_setaffinity_pending(struct irq_data *d) static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
{ {
...@@ -299,6 +299,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) ...@@ -299,6 +299,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
} }
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{ {
return d->hwirq; return d->hwirq;
......
...@@ -360,8 +360,6 @@ void rcu_user_exit(void); ...@@ -360,8 +360,6 @@ void rcu_user_exit(void);
#else #else
static inline void rcu_user_enter(void) { } static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { } static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_NO_HZ_FULL */ #endif /* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
......
...@@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work); ...@@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work);
} }
/* /*
* define and init a srcu struct at build time. * Define and initialize a srcu struct at build time.
* dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
*
* Note that although DEFINE_STATIC_SRCU() hides the name from other
* files, the per-CPU variable rules nevertheless require that the
* chosen name be globally unique. These rules also prohibit use of
* DEFINE_STATIC_SRCU() within a function. If these rules are too
* restrictive, declare the srcu_struct manually. For example, in
* each file:
*
* static struct srcu_struct my_srcu;
*
* Then, before the first use of each my_srcu, manually initialize it:
*
* init_srcu_struct(&my_srcu);
*
* See include/linux/percpu-defs.h for the rules on per-CPU variables.
*/ */
#define __DEFINE_SRCU(name, is_static) \ #define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
......
...@@ -160,6 +160,8 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) ...@@ -160,6 +160,8 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
__irq_put_desc_unlock(desc, flags, false); __irq_put_desc_unlock(desc, flags, false);
} }
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
/* /*
* Manipulation functions for irq_data.state * Manipulation functions for irq_data.state
*/ */
...@@ -188,6 +190,8 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) ...@@ -188,6 +190,8 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
return __irqd_to_state(d) & mask; return __irqd_to_state(d) & mask;
} }
#undef __irqd_to_state
static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
{ {
__this_cpu_inc(*desc->kstat_irqs); __this_cpu_inc(*desc->kstat_irqs);
......
...@@ -932,12 +932,14 @@ rcu_torture_writer(void *arg) ...@@ -932,12 +932,14 @@ rcu_torture_writer(void *arg)
int nsynctypes = 0; int nsynctypes = 0;
VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
pr_alert("%s" TORTURE_FLAG if (!can_expedite) {
" Grace periods expedited from boot/sysfs for %s,\n", pr_alert("%s" TORTURE_FLAG
torture_type, cur_ops->name); " Grace periods expedited from boot/sysfs for %s,\n",
pr_alert("%s" TORTURE_FLAG torture_type, cur_ops->name);
" Testing of dynamic grace-period expediting diabled.\n", pr_alert("%s" TORTURE_FLAG
torture_type); " Disabled dynamic grace-period expediting.\n",
torture_type);
}
/* Initialize synctype[] array. If none set, take default. */ /* Initialize synctype[] array. If none set, take default. */
if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
*/ */
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/module.h> #include <linux/init.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
...@@ -122,18 +122,7 @@ static int __init rcutiny_trace_init(void) ...@@ -122,18 +122,7 @@ static int __init rcutiny_trace_init(void)
debugfs_remove_recursive(rcudir); debugfs_remove_recursive(rcudir);
return 1; return 1;
} }
device_initcall(rcutiny_trace_init);
static void __exit rcutiny_trace_cleanup(void)
{
debugfs_remove_recursive(rcudir);
}
module_init(rcutiny_trace_init);
module_exit(rcutiny_trace_cleanup);
MODULE_AUTHOR("Paul E. McKenney");
MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
MODULE_LICENSE("GPL");
static void check_cpu_stall(struct rcu_ctrlblk *rcp) static void check_cpu_stall(struct rcu_ctrlblk *rcp)
{ {
......
...@@ -108,7 +108,6 @@ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); ...@@ -108,7 +108,6 @@ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
static struct rcu_state *const rcu_state_p; static struct rcu_state *const rcu_state_p;
static struct rcu_data __percpu *const rcu_data_p;
LIST_HEAD(rcu_struct_flavors); LIST_HEAD(rcu_struct_flavors);
/* Dump rcu_node combining tree at boot to verify correct setup. */ /* Dump rcu_node combining tree at boot to verify correct setup. */
...@@ -1083,13 +1082,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp, ...@@ -1083,13 +1082,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
rcu_sysidle_check_cpu(rdp, isidle, maxj); rcu_sysidle_check_cpu(rdp, isidle, maxj);
if ((rdp->dynticks_snap & 0x1) == 0) { if ((rdp->dynticks_snap & 0x1) == 0) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
return 1;
} else {
if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
rdp->mynode->gpnum)) rdp->mynode->gpnum))
WRITE_ONCE(rdp->gpwrap, true); WRITE_ONCE(rdp->gpwrap, true);
return 0; return 1;
} }
return 0;
} }
/* /*
...@@ -1173,15 +1171,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, ...@@ -1173,15 +1171,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
smp_mb(); /* ->cond_resched_completed before *rcrmp. */ smp_mb(); /* ->cond_resched_completed before *rcrmp. */
WRITE_ONCE(*rcrmp, WRITE_ONCE(*rcrmp,
READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask); READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
rdp->rsp->jiffies_resched += 5; /* Enable beating. */
} else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
/* Time to beat on that CPU again! */
resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
} }
rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
} }
/* And if it has been a really long time, kick the CPU as well. */
if (ULONG_CMP_GE(jiffies,
rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
return 0; return 0;
} }
...@@ -1246,7 +1245,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp) ...@@ -1246,7 +1245,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
if (rnp->qsmask & (1UL << cpu)) if (rnp->qsmask & (1UL << cpu))
dump_cpu_task(rnp->grplo + cpu); dump_cpu_task(rnp->grplo + cpu);
} }
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
} }
...@@ -1266,12 +1265,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) ...@@ -1266,12 +1265,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
delta = jiffies - READ_ONCE(rsp->jiffies_stall); delta = jiffies - READ_ONCE(rsp->jiffies_stall);
if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
WRITE_ONCE(rsp->jiffies_stall, WRITE_ONCE(rsp->jiffies_stall,
jiffies + 3 * rcu_jiffies_till_stall_check() + 3); jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
/* /*
* OK, time to rat on our buddy... * OK, time to rat on our buddy...
...@@ -1292,7 +1291,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) ...@@ -1292,7 +1291,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
ndetected++; ndetected++;
} }
} }
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
print_cpu_stall_info_end(); print_cpu_stall_info_end();
...@@ -1357,7 +1356,7 @@ static void print_cpu_stall(struct rcu_state *rsp) ...@@ -1357,7 +1356,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
WRITE_ONCE(rsp->jiffies_stall, WRITE_ONCE(rsp->jiffies_stall,
jiffies + 3 * rcu_jiffies_till_stall_check() + 3); jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
/* /*
* Attempt to revive the RCU machinery by forcing a context switch. * Attempt to revive the RCU machinery by forcing a context switch.
...@@ -1595,7 +1594,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, ...@@ -1595,7 +1594,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
} }
unlock_out: unlock_out:
if (rnp != rnp_root) if (rnp != rnp_root)
raw_spin_unlock(&rnp_root->lock); raw_spin_unlock_rcu_node(rnp_root);
out: out:
if (c_out != NULL) if (c_out != NULL)
*c_out = c; *c_out = c;
...@@ -1814,7 +1813,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -1814,7 +1813,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
return; return;
} }
needwake = __note_gp_changes(rsp, rnp, rdp); needwake = __note_gp_changes(rsp, rnp, rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake) if (needwake)
rcu_gp_kthread_wake(rsp); rcu_gp_kthread_wake(rsp);
} }
...@@ -1839,7 +1838,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) ...@@ -1839,7 +1838,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
raw_spin_lock_irq_rcu_node(rnp); raw_spin_lock_irq_rcu_node(rnp);
if (!READ_ONCE(rsp->gp_flags)) { if (!READ_ONCE(rsp->gp_flags)) {
/* Spurious wakeup, tell caller to go back to sleep. */ /* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
return false; return false;
} }
WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
...@@ -1849,7 +1848,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) ...@@ -1849,7 +1848,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* Grace period already in progress, don't start another. * Grace period already in progress, don't start another.
* Not supposed to be able to happen. * Not supposed to be able to happen.
*/ */
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
return false; return false;
} }
...@@ -1858,7 +1857,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) ...@@ -1858,7 +1857,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
/* Record GP times before starting GP, hence smp_store_release(). */ /* Record GP times before starting GP, hence smp_store_release(). */
smp_store_release(&rsp->gpnum, rsp->gpnum + 1); smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
/* /*
* Apply per-leaf buffered online and offline operations to the * Apply per-leaf buffered online and offline operations to the
...@@ -1872,7 +1871,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) ...@@ -1872,7 +1871,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
if (rnp->qsmaskinit == rnp->qsmaskinitnext && if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
!rnp->wait_blkd_tasks) { !rnp->wait_blkd_tasks) {
/* Nothing to do on this leaf rcu_node structure. */ /* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
continue; continue;
} }
...@@ -1906,7 +1905,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) ...@@ -1906,7 +1905,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
rcu_cleanup_dead_rnp(rnp); rcu_cleanup_dead_rnp(rnp);
} }
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
} }
/* /*
...@@ -1937,7 +1936,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) ...@@ -1937,7 +1936,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
trace_rcu_grace_period_init(rsp->name, rnp->gpnum, trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
rnp->level, rnp->grplo, rnp->level, rnp->grplo,
rnp->grphi, rnp->qsmask); rnp->grphi, rnp->qsmask);
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
cond_resched_rcu_qs(); cond_resched_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
} }
...@@ -1995,7 +1994,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time) ...@@ -1995,7 +1994,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
raw_spin_lock_irq_rcu_node(rnp); raw_spin_lock_irq_rcu_node(rnp);
WRITE_ONCE(rsp->gp_flags, WRITE_ONCE(rsp->gp_flags,
READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
} }
} }
...@@ -2025,7 +2024,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -2025,7 +2024,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
* safe for us to drop the lock in order to mark the grace * safe for us to drop the lock in order to mark the grace
* period as completed in all of the rcu_node structures. * period as completed in all of the rcu_node structures.
*/ */
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
/* /*
* Propagate new ->completed value to rcu_node structures so * Propagate new ->completed value to rcu_node structures so
...@@ -2047,7 +2046,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -2047,7 +2046,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
/* smp_mb() provided by prior unlock-lock pair. */ /* smp_mb() provided by prior unlock-lock pair. */
nocb += rcu_future_gp_cleanup(rsp, rnp); nocb += rcu_future_gp_cleanup(rsp, rnp);
sq = rcu_nocb_gp_get(rnp); sq = rcu_nocb_gp_get(rnp);
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
rcu_nocb_gp_cleanup(sq); rcu_nocb_gp_cleanup(sq);
cond_resched_rcu_qs(); cond_resched_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
...@@ -2070,7 +2069,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -2070,7 +2069,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
READ_ONCE(rsp->gpnum), READ_ONCE(rsp->gpnum),
TPS("newreq")); TPS("newreq"));
} }
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq_rcu_node(rnp);
} }
/* /*
...@@ -2236,18 +2235,20 @@ static bool rcu_start_gp(struct rcu_state *rsp) ...@@ -2236,18 +2235,20 @@ static bool rcu_start_gp(struct rcu_state *rsp)
} }
/* /*
* Report a full set of quiescent states to the specified rcu_state * Report a full set of quiescent states to the specified rcu_state data
* data structure. This involves cleaning up after the prior grace * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
* period and letting rcu_start_gp() start up the next grace period * kthread if another grace period is required. Whether we wake
* if one is needed. Note that the caller must hold rnp->lock, which * the grace-period kthread or it awakens itself for the next round
* is released before return. * of quiescent-state forcing, that kthread will clean up after the
* just-completed grace period. Note that the caller must hold rnp->lock,
* which is released before return.
*/ */
static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock) __releases(rcu_get_root(rsp)->lock)
{ {
WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
} }
...@@ -2277,7 +2278,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, ...@@ -2277,7 +2278,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
* Our bit has already been cleared, or the * Our bit has already been cleared, or the
* relevant grace period is already over, so done. * relevant grace period is already over, so done.
*/ */
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
...@@ -2289,7 +2290,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, ...@@ -2289,7 +2290,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
/* Other bits still set at this level, so done. */ /* Other bits still set at this level, so done. */
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
mask = rnp->grpmask; mask = rnp->grpmask;
...@@ -2299,7 +2300,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, ...@@ -2299,7 +2300,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
break; break;
} }
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp_c = rnp; rnp_c = rnp;
rnp = rnp->parent; rnp = rnp->parent;
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
...@@ -2331,7 +2332,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp, ...@@ -2331,7 +2332,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; /* Still need more quiescent states! */ return; /* Still need more quiescent states! */
} }
...@@ -2348,19 +2349,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp, ...@@ -2348,19 +2349,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
/* Report up the rest of the hierarchy, tracking current ->gpnum. */ /* Report up the rest of the hierarchy, tracking current ->gpnum. */
gps = rnp->gpnum; gps = rnp->gpnum;
mask = rnp->grpmask; mask = rnp->grpmask;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags); rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
} }
/* /*
* Record a quiescent state for the specified CPU to that CPU's rcu_data * Record a quiescent state for the specified CPU to that CPU's rcu_data
* structure. This must be either called from the specified CPU, or * structure. This must be called from the specified CPU.
* called when the specified CPU is known to be offline (and when it is
* also known that no other CPU is concurrently trying to help the offline
* CPU). The lastcomp argument is used to make sure we are still in the
* grace period of interest. We don't want to end the current grace period
* based on quiescent states detected in an earlier grace period!
*/ */
static void static void
rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
...@@ -2385,14 +2381,14 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -2385,14 +2381,14 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
*/ */
rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
mask = rdp->grpmask; mask = rdp->grpmask;
if ((rnp->qsmask & mask) == 0) { if ((rnp->qsmask & mask) == 0) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} else { } else {
rdp->core_needs_qs = 0; rdp->core_needs_qs = false;
/* /*
* This GP can't end until cpu checks in, so all of our * This GP can't end until cpu checks in, so all of our
...@@ -2601,10 +2597,11 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) ...@@ -2601,10 +2597,11 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
rnp->qsmaskinit &= ~mask; rnp->qsmaskinit &= ~mask;
rnp->qsmask &= ~mask; rnp->qsmask &= ~mask;
if (rnp->qsmaskinit) { if (rnp->qsmaskinit) {
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_rcu_node(rnp);
/* irqs remain disabled. */
return; return;
} }
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
} }
} }
...@@ -2627,7 +2624,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) ...@@ -2627,7 +2624,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
mask = rdp->grpmask; mask = rdp->grpmask;
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
rnp->qsmaskinitnext &= ~mask; rnp->qsmaskinitnext &= ~mask;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/* /*
...@@ -2861,7 +2858,7 @@ static void force_qs_rnp(struct rcu_state *rsp, ...@@ -2861,7 +2858,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
} else { } else {
/* Nothing to do here, so just drop the lock. */ /* Nothing to do here, so just drop the lock. */
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
} }
} }
...@@ -2897,11 +2894,11 @@ static void force_quiescent_state(struct rcu_state *rsp) ...@@ -2897,11 +2894,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
raw_spin_unlock(&rnp_old->fqslock); raw_spin_unlock(&rnp_old->fqslock);
if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
rsp->n_force_qs_lh++; rsp->n_force_qs_lh++;
raw_spin_unlock_irqrestore(&rnp_old->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
return; /* Someone beat us to it. */ return; /* Someone beat us to it. */
} }
WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
raw_spin_unlock_irqrestore(&rnp_old->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
} }
...@@ -2927,7 +2924,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) ...@@ -2927,7 +2924,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
if (cpu_needs_another_gp(rsp, rdp)) { if (cpu_needs_another_gp(rsp, rdp)) {
raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */ raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
needwake = rcu_start_gp(rsp); needwake = rcu_start_gp(rsp);
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
if (needwake) if (needwake)
rcu_gp_kthread_wake(rsp); rcu_gp_kthread_wake(rsp);
} else { } else {
...@@ -3018,7 +3015,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, ...@@ -3018,7 +3015,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
raw_spin_lock_rcu_node(rnp_root); raw_spin_lock_rcu_node(rnp_root);
needwake = rcu_start_gp(rsp); needwake = rcu_start_gp(rsp);
raw_spin_unlock(&rnp_root->lock); raw_spin_unlock_rcu_node(rnp_root);
if (needwake) if (needwake)
rcu_gp_kthread_wake(rsp); rcu_gp_kthread_wake(rsp);
} else { } else {
...@@ -3438,14 +3435,14 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) ...@@ -3438,14 +3435,14 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
rcu_for_each_leaf_node(rsp, rnp) { rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->expmaskinit == rnp->expmaskinitnext) { if (rnp->expmaskinit == rnp->expmaskinitnext) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
continue; /* No new CPUs, nothing to do. */ continue; /* No new CPUs, nothing to do. */
} }
/* Update this node's mask, track old value for propagation. */ /* Update this node's mask, track old value for propagation. */
oldmask = rnp->expmaskinit; oldmask = rnp->expmaskinit;
rnp->expmaskinit = rnp->expmaskinitnext; rnp->expmaskinit = rnp->expmaskinitnext;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
/* If was already nonzero, nothing to propagate. */ /* If was already nonzero, nothing to propagate. */
if (oldmask) if (oldmask)
...@@ -3460,7 +3457,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) ...@@ -3460,7 +3457,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
if (rnp_up->expmaskinit) if (rnp_up->expmaskinit)
done = true; done = true;
rnp_up->expmaskinit |= mask; rnp_up->expmaskinit |= mask;
raw_spin_unlock_irqrestore(&rnp_up->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
if (done) if (done)
break; break;
mask = rnp_up->grpmask; mask = rnp_up->grpmask;
...@@ -3483,7 +3480,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp) ...@@ -3483,7 +3480,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
WARN_ON_ONCE(rnp->expmask); WARN_ON_ONCE(rnp->expmask);
rnp->expmask = rnp->expmaskinit; rnp->expmask = rnp->expmaskinit;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
} }
...@@ -3524,11 +3521,11 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -3524,11 +3521,11 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
if (!rnp->expmask) if (!rnp->expmask)
rcu_initiate_boost(rnp, flags); rcu_initiate_boost(rnp, flags);
else else
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
break; break;
} }
if (rnp->parent == NULL) { if (rnp->parent == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (wake) { if (wake) {
smp_mb(); /* EGP done before wake_up(). */ smp_mb(); /* EGP done before wake_up(). */
swake_up(&rsp->expedited_wq); swake_up(&rsp->expedited_wq);
...@@ -3536,7 +3533,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -3536,7 +3533,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
break; break;
} }
mask = rnp->grpmask; mask = rnp->grpmask;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
rnp = rnp->parent; rnp = rnp->parent;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
WARN_ON_ONCE(!(rnp->expmask & mask)); WARN_ON_ONCE(!(rnp->expmask & mask));
...@@ -3571,7 +3568,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -3571,7 +3568,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (!(rnp->expmask & mask)) { if (!(rnp->expmask & mask)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
rnp->expmask &= ~mask; rnp->expmask &= ~mask;
...@@ -3732,7 +3729,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, ...@@ -3732,7 +3729,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
*/ */
if (rcu_preempt_has_tasks(rnp)) if (rcu_preempt_has_tasks(rnp))
rnp->exp_tasks = rnp->blkd_tasks.next; rnp->exp_tasks = rnp->blkd_tasks.next;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
/* IPI the remaining CPUs for expedited quiescent state. */ /* IPI the remaining CPUs for expedited quiescent state. */
mask = 1; mask = 1;
...@@ -3749,7 +3746,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, ...@@ -3749,7 +3746,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (cpu_online(cpu) && if (cpu_online(cpu) &&
(rnp->expmask & mask)) { (rnp->expmask & mask)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
if (cpu_online(cpu) && if (cpu_online(cpu) &&
(rnp->expmask & mask)) (rnp->expmask & mask))
...@@ -3758,7 +3755,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, ...@@ -3758,7 +3755,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
} }
if (!(rnp->expmask & mask)) if (!(rnp->expmask & mask))
mask_ofl_ipi &= ~mask; mask_ofl_ipi &= ~mask;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/* Report quiescent states for those that went offline. */ /* Report quiescent states for those that went offline. */
mask_ofl_test |= mask_ofl_ipi; mask_ofl_test |= mask_ofl_ipi;
...@@ -4165,7 +4162,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) ...@@ -4165,7 +4162,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
return; return;
raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
rnp->qsmaskinit |= mask; rnp->qsmaskinit |= mask;
raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
} }
} }
...@@ -4189,7 +4186,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) ...@@ -4189,7 +4186,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->rsp = rsp; rdp->rsp = rsp;
mutex_init(&rdp->exp_funnel_mutex); mutex_init(&rdp->exp_funnel_mutex);
rcu_boot_init_nocb_percpu_data(rdp); rcu_boot_init_nocb_percpu_data(rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/* /*
...@@ -4217,7 +4214,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) ...@@ -4217,7 +4214,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rcu_sysidle_init_percpu_data(rdp->dynticks); rcu_sysidle_init_percpu_data(rdp->dynticks);
atomic_set(&rdp->dynticks->dynticks, atomic_set(&rdp->dynticks->dynticks,
(atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
/* /*
* Add CPU to leaf rcu_node pending-online bitmask. Any needed * Add CPU to leaf rcu_node pending-online bitmask. Any needed
...@@ -4238,7 +4235,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) ...@@ -4238,7 +4235,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
rdp->core_needs_qs = false; rdp->core_needs_qs = false;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
static void rcu_prepare_cpu(int cpu) static void rcu_prepare_cpu(int cpu)
...@@ -4360,7 +4357,7 @@ static int __init rcu_spawn_gp_kthread(void) ...@@ -4360,7 +4357,7 @@ static int __init rcu_spawn_gp_kthread(void)
sp.sched_priority = kthread_prio; sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
} }
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
wake_up_process(t); wake_up_process(t);
} }
rcu_spawn_nocb_kthreads(); rcu_spawn_nocb_kthreads();
...@@ -4451,8 +4448,8 @@ static void __init rcu_init_one(struct rcu_state *rsp) ...@@ -4451,8 +4448,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
cpustride *= levelspread[i]; cpustride *= levelspread[i];
rnp = rsp->level[i]; rnp = rsp->level[i];
for (j = 0; j < levelcnt[i]; j++, rnp++) { for (j = 0; j < levelcnt[i]; j++, rnp++) {
raw_spin_lock_init(&rnp->lock); raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
lockdep_set_class_and_name(&rnp->lock, lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
&rcu_node_class[i], buf[i]); &rcu_node_class[i], buf[i]);
raw_spin_lock_init(&rnp->fqslock); raw_spin_lock_init(&rnp->fqslock);
lockdep_set_class_and_name(&rnp->fqslock, lockdep_set_class_and_name(&rnp->fqslock,
......
...@@ -150,8 +150,9 @@ struct rcu_dynticks { ...@@ -150,8 +150,9 @@ struct rcu_dynticks {
* Definition for node within the RCU grace-period-detection hierarchy. * Definition for node within the RCU grace-period-detection hierarchy.
*/ */
struct rcu_node { struct rcu_node {
raw_spinlock_t lock; /* Root rcu_node's lock protects some */ raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
/* rcu_state fields as well as following. */ /* some rcu_state fields as well as */
/* following. */
unsigned long gpnum; /* Current grace period for this node. */ unsigned long gpnum; /* Current grace period for this node. */
/* This will either be equal to or one */ /* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */ /* behind the root rcu_node's gpnum. */
...@@ -682,7 +683,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) ...@@ -682,7 +683,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
#endif /* #else #ifdef CONFIG_PPC */ #endif /* #else #ifdef CONFIG_PPC */
/* /*
* Wrappers for the rcu_node::lock acquire. * Wrappers for the rcu_node::lock acquire and release.
* *
* Because the rcu_nodes form a tree, the tree traversal locking will observe * Because the rcu_nodes form a tree, the tree traversal locking will observe
* different lock values, this in turn means that an UNLOCK of one level * different lock values, this in turn means that an UNLOCK of one level
...@@ -691,29 +692,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) ...@@ -691,29 +692,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
* *
* In order to restore full ordering between tree levels, augment the regular * In order to restore full ordering between tree levels, augment the regular
* lock acquire functions with smp_mb__after_unlock_lock(). * lock acquire functions with smp_mb__after_unlock_lock().
*
* As ->lock of struct rcu_node is a __private field, therefore one should use
* these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
*/ */
static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
{ {
raw_spin_lock(&rnp->lock); raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock();
} }
static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
{
raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
}
static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
{ {
raw_spin_lock_irq(&rnp->lock); raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock();
} }
#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
do { \ {
typecheck(unsigned long, flags); \ raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
raw_spin_lock_irqsave(&(rnp)->lock, flags); \ }
smp_mb__after_unlock_lock(); \
#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
do { \
typecheck(unsigned long, flags); \
raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
smp_mb__after_unlock_lock(); \
} while (0)
#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
do { \
typecheck(unsigned long, flags); \
raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
} while (0) } while (0)
static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
{ {
bool locked = raw_spin_trylock(&rnp->lock); bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
if (locked) if (locked)
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock();
......
...@@ -235,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) ...@@ -235,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
rnp->gp_tasks = &t->rcu_node_entry; rnp->gp_tasks = &t->rcu_node_entry;
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry; rnp->exp_tasks = &t->rcu_node_entry;
raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
/* /*
* Report the quiescent state for the expedited GP. This expedited * Report the quiescent state for the expedited GP. This expedited
...@@ -489,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -489,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
!!rnp->gp_tasks); !!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags); rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
} else { } else {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/* Unboost if we were boosted. */ /* Unboost if we were boosted. */
...@@ -518,14 +518,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) ...@@ -518,14 +518,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (!rcu_preempt_blocked_readers_cgp(rnp)) { if (!rcu_preempt_blocked_readers_cgp(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
t = list_entry(rnp->gp_tasks->prev, t = list_entry(rnp->gp_tasks->prev,
struct task_struct, rcu_node_entry); struct task_struct, rcu_node_entry);
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
sched_show_task(t); sched_show_task(t);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/* /*
...@@ -807,7 +807,6 @@ void exit_rcu(void) ...@@ -807,7 +807,6 @@ void exit_rcu(void)
#else /* #ifdef CONFIG_PREEMPT_RCU */ #else /* #ifdef CONFIG_PREEMPT_RCU */
static struct rcu_state *const rcu_state_p = &rcu_sched_state; static struct rcu_state *const rcu_state_p = &rcu_sched_state;
static struct rcu_data __percpu *const rcu_data_p = &rcu_sched_data;
/* /*
* Tell them what RCU they are running. * Tell them what RCU they are running.
...@@ -991,7 +990,7 @@ static int rcu_boost(struct rcu_node *rnp) ...@@ -991,7 +990,7 @@ static int rcu_boost(struct rcu_node *rnp)
* might exit their RCU read-side critical sections on their own. * might exit their RCU read-side critical sections on their own.
*/ */
if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return 0; return 0;
} }
...@@ -1028,7 +1027,7 @@ static int rcu_boost(struct rcu_node *rnp) ...@@ -1028,7 +1027,7 @@ static int rcu_boost(struct rcu_node *rnp)
*/ */
t = container_of(tb, struct task_struct, rcu_node_entry); t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
/* Lock only for side effect: boosts task t's priority. */ /* Lock only for side effect: boosts task t's priority. */
rt_mutex_lock(&rnp->boost_mtx); rt_mutex_lock(&rnp->boost_mtx);
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
...@@ -1088,7 +1087,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) ...@@ -1088,7 +1087,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
rnp->n_balk_exp_gp_tasks++; rnp->n_balk_exp_gp_tasks++;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
if (rnp->exp_tasks != NULL || if (rnp->exp_tasks != NULL ||
...@@ -1098,13 +1097,13 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) ...@@ -1098,13 +1097,13 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
ULONG_CMP_GE(jiffies, rnp->boost_time))) { ULONG_CMP_GE(jiffies, rnp->boost_time))) {
if (rnp->exp_tasks == NULL) if (rnp->exp_tasks == NULL)
rnp->boost_tasks = rnp->gp_tasks; rnp->boost_tasks = rnp->gp_tasks;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
t = rnp->boost_kthread_task; t = rnp->boost_kthread_task;
if (t) if (t)
rcu_wake_cond(t, rnp->boost_kthread_status); rcu_wake_cond(t, rnp->boost_kthread_status);
} else { } else {
rcu_initiate_boost_trace(rnp); rcu_initiate_boost_trace(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
} }
...@@ -1172,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, ...@@ -1172,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return PTR_ERR(t); return PTR_ERR(t);
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->boost_kthread_task = t; rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
sp.sched_priority = kthread_prio; sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
...@@ -1308,7 +1307,7 @@ static void rcu_prepare_kthreads(int cpu) ...@@ -1308,7 +1307,7 @@ static void rcu_prepare_kthreads(int cpu)
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock) __releases(rnp->lock)
{ {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
static void invoke_rcu_callbacks_kthread(void) static void invoke_rcu_callbacks_kthread(void)
...@@ -1559,7 +1558,7 @@ static void rcu_prepare_for_idle(void) ...@@ -1559,7 +1558,7 @@ static void rcu_prepare_for_idle(void)
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
needwake = rcu_accelerate_cbs(rsp, rnp, rdp); needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
if (needwake) if (needwake)
rcu_gp_kthread_wake(rsp); rcu_gp_kthread_wake(rsp);
} }
...@@ -2064,7 +2063,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2064,7 +2063,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
needwake = rcu_start_future_gp(rnp, rdp, &c); needwake = rcu_start_future_gp(rnp, rdp, &c);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake) if (needwake)
rcu_gp_kthread_wake(rdp->rsp); rcu_gp_kthread_wake(rdp->rsp);
......
...@@ -269,7 +269,8 @@ our $Sparse = qr{ ...@@ -269,7 +269,8 @@ our $Sparse = qr{
__init_refok| __init_refok|
__kprobes| __kprobes|
__ref| __ref|
__rcu __rcu|
__private
}x; }x;
our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)}; our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)};
our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)}; our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册