From 5871968d531f39c23a8e6c69525bb705bca52e04 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Feb 2015 11:05:36 -0800 Subject: [PATCH] rcu: Tighten up affinity and check for sysidle If the RCU grace-period kthread invoking rcu_sysidle_check_cpu() happens to be running on the tick_do_timer_cpu initially, then rcu_bind_gp_kthread() won't bind it. This kthread might then migrate before invoking rcu_gp_fqs(), which will trigger the WARN_ON_ONCE() in rcu_sysidle_check_cpu(). This commit therefore makes rcu_bind_gp_kthread() do the binding even if the kthread is currently on the same CPU. Because this incurs added overhead, this commit also causes each RCU grace-period kthread to invoke rcu_bind_gp_kthread() once at boot rather than at the beginning of each grace period. And as long as rcu_bind_gp_kthread() is being modified, this commit eliminates its #ifdef. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree_plugin.h | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 735bd7ee749a..a6972c20eaa5 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1707,7 +1707,6 @@ static int rcu_gp_init(struct rcu_state *rsp) struct rcu_node *rnp = rcu_get_root(rsp); ACCESS_ONCE(rsp->gp_activity) = jiffies; - rcu_bind_gp_kthread(); raw_spin_lock_irq(&rnp->lock); smp_mb__after_unlock_lock(); if (!ACCESS_ONCE(rsp->gp_flags)) { @@ -1895,6 +1894,7 @@ static int __noreturn rcu_gp_kthread(void *arg) struct rcu_state *rsp = arg; struct rcu_node *rnp = rcu_get_root(rsp); + rcu_bind_gp_kthread(); for (;;) { /* Handle grace-period start. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0a571e9a0f1d..b46c92824db1 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2763,7 +2763,8 @@ static void rcu_sysidle_exit(int irq) /* * Check to see if the current CPU is idle. Note that usermode execution - * does not count as idle. The caller must have disabled interrupts. + * does not count as idle. The caller must have disabled interrupts, + * and must be running on tick_do_timer_cpu. */ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, unsigned long *maxj) @@ -2784,8 +2785,8 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, if (!*isidle || rdp->rsp != rcu_state_p || cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) return; - if (rcu_gp_in_progress(rdp->rsp)) - WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); + /* Verify affinity of current kthread. */ + WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); /* Pick up current idle and NMI-nesting counter and check. */ cur = atomic_read(&rdtp->dynticks_idle); @@ -3068,11 +3069,10 @@ static void rcu_bind_gp_kthread(void) return; #ifdef CONFIG_NO_HZ_FULL_SYSIDLE cpu = tick_do_timer_cpu; - if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu) + if (cpu >= 0 && cpu < nr_cpu_ids) set_cpus_allowed_ptr(current, cpumask_of(cpu)); #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ - if (!is_housekeeping_cpu(raw_smp_processor_id())) - housekeeping_affine(current); + housekeeping_affine(current); #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ } -- GitLab