diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1116ebf5715bd1e3ccb0b369898c56f6bed716bc..310bcc79b07b5c5e0f0fd83392b8bad839b2f05a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2277,8 +2277,6 @@ rcu_report_qs_rdp(struct rcu_data *rdp) unsigned long flags; unsigned long mask; bool needwake = false; - const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && - rcu_segcblist_is_offloaded(&rdp->cblist); struct rcu_node *rnp; WARN_ON_ONCE(rdp->cpu != smp_processor_id()); @@ -2302,9 +2300,13 @@ rcu_report_qs_rdp(struct rcu_data *rdp) if ((rnp->qsmask & mask) == 0) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } else { + const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && + rcu_segcblist_is_offloaded(&rdp->cblist); /* * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. + * + * NOCB kthreads have their own way to deal with that. */ if (!offloaded) needwake = rcu_accelerate_cbs(rnp, rdp);