diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a1a72a1ecb026baf16d335140b3c29718a608d27..21d740f0b8dc8a99698611276e3ee898aebdf57c 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -161,6 +161,8 @@ struct rcu_data { /* ticks this CPU has handled */ /* during and after the last grace */ /* period it is aware of. */ + struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ + bool defer_qs_iw_pending; /* Scheduler attention pending? */ /* 2) batch handling */ struct rcu_segcblist cblist; /* Segmented callback list, with */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index e1005f5e8094e5f362ae9e27b614fbea9e288a6c..58c7853f19e73e7ac713b1ccf8e7630e5121e617 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -587,6 +587,17 @@ static void rcu_preempt_deferred_qs(struct task_struct *t) t->rcu_read_lock_nesting += RCU_NEST_BIAS; } +/* + * Minimal handler to give the scheduler a chance to re-evaluate. + */ +static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) +{ + struct rcu_data *rdp; + + rdp = container_of(iwp, struct rcu_data, defer_qs_iw); + rdp->defer_qs_iw_pending = false; +} + /* * Handle special cases during rcu_read_unlock(), such as needing to * notify RCU core processing or task having blocked during the RCU @@ -630,6 +641,15 @@ static void rcu_read_unlock_special(struct task_struct *t) // Also if no expediting or NO_HZ_FULL, slow is OK. set_tsk_need_resched(current); set_preempt_need_resched(); + if (IS_ENABLED(CONFIG_IRQ_WORK) && + !rdp->defer_qs_iw_pending && exp) { + // Get scheduler to re-evaluate and call hooks. + // If !IRQ_WORK, FQS scan will eventually IPI. + init_irq_work(&rdp->defer_qs_iw, + rcu_preempt_deferred_qs_handler); + rdp->defer_qs_iw_pending = true; + irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); + } } t->rcu_read_unlock_special.b.deferred_qs = true; local_irq_restore(flags);