提交 c7e88067 编写于 作者: P Paul E. McKenney

srcu: Exact tracking of srcu_data structures containing callbacks

The current Tree SRCU implementation schedules a workqueue for every
srcu_data covered by a given leaf srcu_node structure having callbacks,
even if only one of those srcu_data structures actually contains
callbacks.  This is clearly inefficient for workloads that don't feature
callbacks everywhere all the time.  This commit therefore adds an array
of masks that are used by the leaf srcu_node structures to track exactly
which srcu_data structures contain callbacks.
Signed-off-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: NMike Galbraith <efault@gmx.de>
上级 d160a727
...@@ -47,6 +47,8 @@ struct srcu_data { ...@@ -47,6 +47,8 @@ struct srcu_data {
struct delayed_work work; /* Context for CB invoking. */ struct delayed_work work; /* Context for CB invoking. */
struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
struct srcu_node *mynode; /* Leaf srcu_node. */ struct srcu_node *mynode; /* Leaf srcu_node. */
unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */
int cpu; int cpu;
struct srcu_struct *sp; struct srcu_struct *sp;
}; };
...@@ -59,6 +61,8 @@ struct srcu_node { ...@@ -59,6 +61,8 @@ struct srcu_node {
unsigned long srcu_have_cbs[4]; /* GP seq for children */ unsigned long srcu_have_cbs[4]; /* GP seq for children */
/* having CBs, but only */ /* having CBs, but only */
/* is > ->srcu_gq_seq. */ /* is > ->srcu_gq_seq. */
unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
/* have CBs for given GP? */
struct srcu_node *srcu_parent; /* Next up in tree. */ struct srcu_node *srcu_parent; /* Next up in tree. */
int grplo; /* Least CPU for node. */ int grplo; /* Least CPU for node. */
int grphi; /* Biggest CPU for node. */ int grphi; /* Biggest CPU for node. */
......
...@@ -66,8 +66,12 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) ...@@ -66,8 +66,12 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
/* Each pass through this loop initializes one srcu_node structure. */ /* Each pass through this loop initializes one srcu_node structure. */
rcu_for_each_node_breadth_first(sp, snp) { rcu_for_each_node_breadth_first(sp, snp) {
spin_lock_init(&snp->lock); spin_lock_init(&snp->lock);
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
snp->srcu_have_cbs[i] = 0; snp->srcu_have_cbs[i] = 0;
snp->srcu_data_have_cbs[i] = 0;
}
snp->grplo = -1; snp->grplo = -1;
snp->grphi = -1; snp->grphi = -1;
if (snp == &sp->node[0]) { if (snp == &sp->node[0]) {
...@@ -107,6 +111,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) ...@@ -107,6 +111,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
sdp->cpu = cpu; sdp->cpu = cpu;
INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
sdp->sp = sp; sdp->sp = sp;
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
if (is_static) if (is_static)
continue; continue;
...@@ -434,16 +439,21 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) ...@@ -434,16 +439,21 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
/* /*
* Schedule callback invocation for all srcu_data structures associated * Schedule callback invocation for all srcu_data structures associated
* with the specified srcu_node structure, if possible, on the corresponding * with the specified srcu_node structure that have callbacks for the
* CPUs. * just-completed grace period, the one corresponding to idx. If possible,
* schedule this invocation on the corresponding CPUs.
*/ */
static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp) static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
unsigned long mask)
{ {
int cpu; int cpu;
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
if (!(mask & (1 << (cpu - snp->grplo))))
continue;
srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu),
atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL);
}
} }
/* /*
...@@ -461,6 +471,7 @@ static void srcu_gp_end(struct srcu_struct *sp) ...@@ -461,6 +471,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
unsigned long gpseq; unsigned long gpseq;
int idx; int idx;
int idxnext; int idxnext;
unsigned long mask;
struct srcu_node *snp; struct srcu_node *snp;
/* Prevent more than one additional grace period. */ /* Prevent more than one additional grace period. */
...@@ -486,10 +497,12 @@ static void srcu_gp_end(struct srcu_struct *sp) ...@@ -486,10 +497,12 @@ static void srcu_gp_end(struct srcu_struct *sp)
cbs = snp->srcu_have_cbs[idx] == gpseq; cbs = snp->srcu_have_cbs[idx] == gpseq;
snp->srcu_have_cbs[idx] = gpseq; snp->srcu_have_cbs[idx] = gpseq;
rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
mask = snp->srcu_data_have_cbs[idx];
snp->srcu_data_have_cbs[idx] = 0;
spin_unlock_irq(&snp->lock); spin_unlock_irq(&snp->lock);
if (cbs) { if (cbs) {
smp_mb(); /* GP end before CB invocation. */ smp_mb(); /* GP end before CB invocation. */
srcu_schedule_cbs_snp(sp, snp); srcu_schedule_cbs_snp(sp, snp, mask);
} }
} }
...@@ -536,6 +549,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, ...@@ -536,6 +549,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp,
spin_lock_irqsave(&snp->lock, flags); spin_lock_irqsave(&snp->lock, flags);
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
snp_seq = snp->srcu_have_cbs[idx]; snp_seq = snp->srcu_have_cbs[idx];
if (snp == sdp->mynode && snp_seq == s)
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
spin_unlock_irqrestore(&snp->lock, flags); spin_unlock_irqrestore(&snp->lock, flags);
if (snp == sdp->mynode && snp_seq != s) { if (snp == sdp->mynode && snp_seq != s) {
smp_mb(); /* CBs after GP! */ smp_mb(); /* CBs after GP! */
...@@ -544,6 +559,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, ...@@ -544,6 +559,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp,
return; return;
} }
snp->srcu_have_cbs[idx] = s; snp->srcu_have_cbs[idx] = s;
if (snp == sdp->mynode)
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
spin_unlock_irqrestore(&snp->lock, flags); spin_unlock_irqrestore(&snp->lock, flags);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册