tree_plugin.h 91.0 KB
Newer Older
1 2 3
/*
 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
 * Internal non-public definitions that provide either classic
P
Paul E. McKenney 已提交
4
 * or preemptible semantics.
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
17 18
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
19 20 21 22 23 24 25 26
 *
 * Copyright Red Hat, 2009
 * Copyright IBM Corporation, 2009
 *
 * Author: Ingo Molnar <mingo@elte.hu>
 *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 */

27
#include <linux/delay.h>
P
Paul E. McKenney 已提交
28
#include <linux/gfp.h>
29
#include <linux/oom.h>
30
#include <linux/smpboot.h>
31
#include "../time/tick-internal.h"
32

33
#ifdef CONFIG_RCU_BOOST
34

35
#include "../locking/rtmutex_common.h"
36

37 38 39 40 41 42 43 44 45
/*
 * Control variables for per-CPU and per-rcu_node kthreads.  These
 * handle all flavors of RCU.
 */
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);

46 47 48 49 50 51 52 53 54 55 56
#else /* #ifdef CONFIG_RCU_BOOST */

/*
 * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
 * all uses are in dead code.  Provide a definition to keep the compiler
 * happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
 * This probably needs to be excluded from -rt builds.
 */
#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })

#endif /* #else #ifdef CONFIG_RCU_BOOST */
57

P
Paul E. McKenney 已提交
58 59 60
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */
61
static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
P
Paul E. McKenney 已提交
62 63
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */

64 65 66 67 68 69 70
/*
 * Check the RCU kernel configuration parameters and print informative
 * messages about anything out of the ordinary.  If you like #ifdef, you
 * will love this function.
 */
static void __init rcu_bootup_announce_oddness(void)
{
71 72
	if (IS_ENABLED(CONFIG_RCU_TRACE))
		pr_info("\tRCU debugfs-based tracing is enabled.\n");
73 74
	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
75
		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
76
		       RCU_FANOUT);
77
	if (rcu_fanout_exact)
78 79 80 81 82 83 84
		pr_info("\tHierarchical RCU autobalancing is disabled.\n");
	if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
		pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
	if (IS_ENABLED(CONFIG_PROVE_RCU))
		pr_info("\tRCU lockdep checking is enabled.\n");
	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE))
		pr_info("\tRCU torture testing starts during boot.\n");
85 86
	if (RCU_NUM_LVLS >= 4)
		pr_info("\tFour(or more)-level hierarchy is enabled.\n");
87
	if (RCU_FANOUT_LEAF != 16)
88
		pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
89 90
			RCU_FANOUT_LEAF);
	if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
91
		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
92
	if (nr_cpu_ids != NR_CPUS)
93
		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
94 95
	if (IS_ENABLED(CONFIG_RCU_BOOST))
		pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
96 97
}

98
#ifdef CONFIG_PREEMPT_RCU
99

100
RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
101
static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
102
static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
103

104 105
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
			       bool wake);
106

107 108 109
/*
 * Tell them what RCU they are running.
 */
110
static void __init rcu_bootup_announce(void)
111
{
112
	pr_info("Preemptible hierarchical RCU implementation.\n");
113
	rcu_bootup_announce_oddness();
114 115
}

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
/* Flags for rcu_preempt_ctxt_queue() decision table. */
#define RCU_GP_TASKS	0x8
#define RCU_EXP_TASKS	0x4
#define RCU_GP_BLKD	0x2
#define RCU_EXP_BLKD	0x1

/*
 * Queues a task preempted within an RCU-preempt read-side critical
 * section into the appropriate location within the ->blkd_tasks list,
 * depending on the states of any ongoing normal and expedited grace
 * periods.  The ->gp_tasks pointer indicates which element the normal
 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
 * indicates which element the expedited grace period is waiting on (again,
 * NULL if none).  If a grace period is waiting on a given element in the
 * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
 * adding a task to the tail of the list blocks any grace period that is
 * already waiting on one of the elements.  In contrast, adding a task
 * to the head of the list won't block any grace period that is already
 * waiting on one of the elements.
 *
 * This queuing is imprecise, and can sometimes make an ongoing grace
 * period wait for a task that is not strictly speaking blocking it.
 * Given the choice, we needlessly block a normal grace period rather than
 * blocking an expedited grace period.
 *
 * Note that an endless sequence of expedited grace periods still cannot
 * indefinitely postpone a normal grace period.  Eventually, all of the
 * fixed number of preempted tasks blocking the normal grace period that are
 * not also blocking the expedited grace period will resume and complete
 * their RCU read-side critical sections.  At that point, the ->gp_tasks
 * pointer will equal the ->exp_tasks pointer, at which point the end of
 * the corresponding expedited grace period will also be the end of the
 * normal grace period.
 */
static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
				   unsigned long flags) __releases(rnp->lock)
{
	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
	struct task_struct *t = current;

	/*
	 * Decide where to queue the newly blocked task.  In theory,
	 * this could be an if-statement.  In practice, when I tried
	 * that, it was quite messy.
	 */
	switch (blkd_state) {
	case 0:
	case                RCU_EXP_TASKS:
	case                RCU_EXP_TASKS + RCU_GP_BLKD:
	case RCU_GP_TASKS:
	case RCU_GP_TASKS + RCU_EXP_TASKS:

		/*
		 * Blocking neither GP, or first task blocking the normal
		 * GP but not blocking the already-waiting expedited GP.
		 * Queue at the head of the list to avoid unnecessarily
		 * blocking the already-waiting GPs.
		 */
		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
		break;

	case                                              RCU_EXP_BLKD:
	case                                RCU_GP_BLKD:
	case                                RCU_GP_BLKD + RCU_EXP_BLKD:
	case RCU_GP_TASKS +                               RCU_EXP_BLKD:
	case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:

		/*
		 * First task arriving that blocks either GP, or first task
		 * arriving that blocks the expedited GP (with the normal
		 * GP already waiting), or a task arriving that blocks
		 * both GPs with both GPs already waiting.  Queue at the
		 * tail of the list to avoid any GP waiting on any of the
		 * already queued tasks that are not blocking it.
		 */
		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
		break;

	case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
	case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
	case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:

		/*
		 * Second or subsequent task blocking the expedited GP.
		 * The task either does not block the normal GP, or is the
		 * first task blocking the normal GP.  Queue just after
		 * the first task blocking the expedited GP.
		 */
		list_add(&t->rcu_node_entry, rnp->exp_tasks);
		break;

	case RCU_GP_TASKS +                 RCU_GP_BLKD:
	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:

		/*
		 * Second or subsequent task blocking the normal GP.
		 * The task does not block the expedited GP. Queue just
		 * after the first task blocking the normal GP.
		 */
		list_add(&t->rcu_node_entry, rnp->gp_tasks);
		break;

	default:

		/* Yet another exercise in excessive paranoia. */
		WARN_ON_ONCE(1);
		break;
	}

	/*
	 * We have now queued the task.  If it was the first one to
	 * block either grace period, update the ->gp_tasks and/or
	 * ->exp_tasks pointers, respectively, to reference the newly
	 * blocked tasks.
	 */
	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
		rnp->gp_tasks = &t->rcu_node_entry;
	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
		rnp->exp_tasks = &t->rcu_node_entry;
	raw_spin_unlock(&rnp->lock);

	/*
	 * Report the quiescent state for the expedited GP.  This expedited
	 * GP should not be able to end until we report, so there should be
	 * no need to check for a subsequent expedited GP.  (Though we are
	 * still in a quiescent state in any case.)
	 */
	if (blkd_state & RCU_EXP_BLKD &&
	    t->rcu_read_unlock_special.b.exp_need_qs) {
		t->rcu_read_unlock_special.b.exp_need_qs = false;
		rcu_report_exp_rdp(rdp->rsp, rdp, true);
	} else {
		WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
	}
	local_irq_restore(flags);
}

257
/*
P
Paul E. McKenney 已提交
258
 * Record a preemptible-RCU quiescent state for the specified CPU.  Note
259 260 261
 * that this just means that the task currently running on the CPU is
 * not in a quiescent state.  There might be any number of tasks blocked
 * while in an RCU read-side critical section.
262
 *
263 264
 * As with the other rcu_*_qs() functions, callers to this function
 * must disable preemption.
265
 */
266
static void rcu_preempt_qs(void)
267
{
268
	if (!__this_cpu_read(rcu_data_p->passed_quiesce)) {
269
		trace_rcu_grace_period(TPS("rcu_preempt"),
270
				       __this_cpu_read(rcu_data_p->gpnum),
271
				       TPS("cpuqs"));
272
		__this_cpu_write(rcu_data_p->passed_quiesce, 1);
273 274 275
		barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
		current->rcu_read_unlock_special.b.need_qs = false;
	}
276 277 278
}

/*
279 280 281
 * We have entered the scheduler, and the current task might soon be
 * context-switched away from.  If this task is in an RCU read-side
 * critical section, we will no longer be able to rely on the CPU to
282 283 284 285 286 287
 * record that fact, so we enqueue the task on the blkd_tasks list.
 * The task will dequeue itself when it exits the outermost enclosing
 * RCU read-side critical section.  Therefore, the current grace period
 * cannot be permitted to complete until the blkd_tasks list entries
 * predating the current grace period drain, in other words, until
 * rnp->gp_tasks becomes NULL.
288 289
 *
 * Caller must disable preemption.
290
 */
291
static void rcu_preempt_note_context_switch(void)
292 293
{
	struct task_struct *t = current;
294
	unsigned long flags;
295 296 297
	struct rcu_data *rdp;
	struct rcu_node *rnp;

298
	if (t->rcu_read_lock_nesting > 0 &&
299
	    !t->rcu_read_unlock_special.b.blocked) {
300 301

		/* Possibly blocking in an RCU read-side critical section. */
302
		rdp = this_cpu_ptr(rcu_state_p->rda);
303
		rnp = rdp->mynode;
P
Paul E. McKenney 已提交
304
		raw_spin_lock_irqsave(&rnp->lock, flags);
305
		smp_mb__after_unlock_lock();
306
		t->rcu_read_unlock_special.b.blocked = true;
307
		t->rcu_blocked_node = rnp;
308 309

		/*
310 311 312
		 * Verify the CPU's sanity, trace the preemption, and
		 * then queue the task as required based on the states
		 * of any ongoing and expedited grace periods.
313
		 */
314
		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
315
		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
316 317 318 319 320
		trace_rcu_preempt_task(rdp->rsp->name,
				       t->pid,
				       (rnp->qsmask & rdp->grpmask)
				       ? rnp->gpnum
				       : rnp->gpnum + 1);
321
		rcu_preempt_ctxt_queue(rnp, rdp, flags);
322
	} else if (t->rcu_read_lock_nesting < 0 &&
323
		   t->rcu_read_unlock_special.s) {
324 325 326 327 328 329

		/*
		 * Complete exit from RCU read-side critical section on
		 * behalf of preempted instance of __rcu_read_unlock().
		 */
		rcu_read_unlock_special(t);
330 331 332 333 334 335 336 337 338 339 340
	}

	/*
	 * Either we were not in an RCU read-side critical section to
	 * begin with, or we have now recorded that critical section
	 * globally.  Either way, we can now note a quiescent state
	 * for this CPU.  Again, if we were in an RCU read-side critical
	 * section, and if that critical section was blocking the current
	 * grace period, then the fact that the task has been enqueued
	 * means that we continue to block the current grace period.
	 */
341
	rcu_preempt_qs();
342 343
}

344 345 346 347 348
/*
 * Check for preempted RCU readers blocking the current grace period
 * for the specified rcu_node structure.  If the caller needs a reliable
 * answer, it must hold the rcu_node's ->lock.
 */
349
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
350
{
351
	return rnp->gp_tasks != NULL;
352 353
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
/*
 * Advance a ->blkd_tasks-list pointer to the next entry, instead
 * returning NULL if at the end of the list.
 */
static struct list_head *rcu_next_node_entry(struct task_struct *t,
					     struct rcu_node *rnp)
{
	struct list_head *np;

	np = t->rcu_node_entry.next;
	if (np == &rnp->blkd_tasks)
		np = NULL;
	return np;
}

369 370 371 372 373 374 375 376 377
/*
 * Return true if the specified rcu_node structure has tasks that were
 * preempted within an RCU read-side critical section.
 */
static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
{
	return !list_empty(&rnp->blkd_tasks);
}

378 379 380 381 382
/*
 * Handle special cases during rcu_read_unlock(), such as needing to
 * notify RCU core processing or task having blocked during the RCU
 * read-side critical section.
 */
383
void rcu_read_unlock_special(struct task_struct *t)
384
{
385 386 387
	bool empty_exp;
	bool empty_norm;
	bool empty_exp_now;
388
	unsigned long flags;
389
	struct list_head *np;
390
	bool drop_boost_mutex = false;
391
	struct rcu_data *rdp;
392
	struct rcu_node *rnp;
393
	union rcu_special special;
394 395 396 397 398 399 400 401

	/* NMI handlers cannot block and cannot safely manipulate state. */
	if (in_nmi())
		return;

	local_irq_save(flags);

	/*
402 403
	 * If RCU core is waiting for this CPU to exit its critical section,
	 * report the fact that it has exited.  Because irqs are disabled,
404
	 * t->rcu_read_unlock_special cannot change.
405 406
	 */
	special = t->rcu_read_unlock_special;
407
	if (special.b.need_qs) {
408
		rcu_preempt_qs();
409
		t->rcu_read_unlock_special.b.need_qs = false;
410
		if (!t->rcu_read_unlock_special.s) {
411 412 413
			local_irq_restore(flags);
			return;
		}
414 415
	}

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	/*
	 * Respond to a request for an expedited grace period, but only if
	 * we were not preempted, meaning that we were running on the same
	 * CPU throughout.  If we were preempted, the exp_need_qs flag
	 * would have been cleared at the time of the first preemption,
	 * and the quiescent state would be reported when we were dequeued.
	 */
	if (special.b.exp_need_qs) {
		WARN_ON_ONCE(special.b.blocked);
		t->rcu_read_unlock_special.b.exp_need_qs = false;
		rdp = this_cpu_ptr(rcu_state_p->rda);
		rcu_report_exp_rdp(rcu_state_p, rdp, true);
		if (!t->rcu_read_unlock_special.s) {
			local_irq_restore(flags);
			return;
		}
	}

434
	/* Hardware IRQ handlers cannot block, complain if they get here. */
435 436 437
	if (in_irq() || in_serving_softirq()) {
		lockdep_rcu_suspicious(__FILE__, __LINE__,
				       "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
438
		pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
439 440
			 t->rcu_read_unlock_special.s,
			 t->rcu_read_unlock_special.b.blocked,
441
			 t->rcu_read_unlock_special.b.exp_need_qs,
442
			 t->rcu_read_unlock_special.b.need_qs);
443 444 445 446 447
		local_irq_restore(flags);
		return;
	}

	/* Clean up if blocked during RCU read-side critical section. */
448 449
	if (special.b.blocked) {
		t->rcu_read_unlock_special.b.blocked = false;
450

451
		/*
452 453 454 455 456
		 * Remove this task from the list it blocked on.  The task
		 * now remains queued on the rcu_node corresponding to
		 * the CPU it first blocked on, so the first attempt to
		 * acquire the task's rcu_node's ->lock will succeed.
		 * Keep the loop and add a WARN_ON() out of sheer paranoia.
457 458
		 */
		for (;;) {
459
			rnp = t->rcu_blocked_node;
P
Paul E. McKenney 已提交
460
			raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
461
			smp_mb__after_unlock_lock();
462
			if (rnp == t->rcu_blocked_node)
463
				break;
464
			WARN_ON_ONCE(1);
P
Paul E. McKenney 已提交
465
			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
466
		}
467
		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
468
		empty_exp = sync_rcu_preempt_exp_done(rnp);
469
		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
470
		np = rcu_next_node_entry(t, rnp);
471
		list_del_init(&t->rcu_node_entry);
472
		t->rcu_blocked_node = NULL;
473
		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
474
						rnp->gpnum, t->pid);
475 476 477 478
		if (&t->rcu_node_entry == rnp->gp_tasks)
			rnp->gp_tasks = np;
		if (&t->rcu_node_entry == rnp->exp_tasks)
			rnp->exp_tasks = np;
479 480 481 482 483 484
		if (IS_ENABLED(CONFIG_RCU_BOOST)) {
			if (&t->rcu_node_entry == rnp->boost_tasks)
				rnp->boost_tasks = np;
			/* Snapshot ->boost_mtx ownership w/rnp->lock held. */
			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
		}
485 486 487 488

		/*
		 * If this was the last task on the current list, and if
		 * we aren't waiting on any CPUs, report the quiescent state.
489 490
		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
		 * so we must take a snapshot of the expedited state.
491
		 */
492
		empty_exp_now = sync_rcu_preempt_exp_done(rnp);
493
		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
494
			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
495 496 497 498 499 500
							 rnp->gpnum,
							 0, rnp->qsmask,
							 rnp->level,
							 rnp->grplo,
							 rnp->grphi,
							 !!rnp->gp_tasks);
501
			rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
502
		} else {
503
			raw_spin_unlock_irqrestore(&rnp->lock, flags);
504
		}
505

506
		/* Unboost if we were boosted. */
507
		if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
508
			rt_mutex_unlock(&rnp->boost_mtx);
509

510 511 512 513
		/*
		 * If this was the last task on the expedited lists,
		 * then we need to report up the rcu_node hierarchy.
		 */
514
		if (!empty_exp && empty_exp_now)
515
			rcu_report_exp_rnp(rcu_state_p, rnp, true);
516 517
	} else {
		local_irq_restore(flags);
518 519 520
	}
}

521 522 523 524 525 526 527 528 529
/*
 * Dump detailed information for all tasks blocking the current RCU
 * grace period on the specified rcu_node structure.
 */
static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
{
	unsigned long flags;
	struct task_struct *t;

530
	raw_spin_lock_irqsave(&rnp->lock, flags);
531 532 533 534
	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
		return;
	}
535
	t = list_entry(rnp->gp_tasks->prev,
536 537 538 539
		       struct task_struct, rcu_node_entry);
	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
		sched_show_task(t);
	raw_spin_unlock_irqrestore(&rnp->lock, flags);
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
}

/*
 * Dump detailed information for all tasks blocking the current RCU
 * grace period.
 */
static void rcu_print_detail_task_stall(struct rcu_state *rsp)
{
	struct rcu_node *rnp = rcu_get_root(rsp);

	rcu_print_detail_task_stall_rnp(rnp);
	rcu_for_each_leaf_node(rsp, rnp)
		rcu_print_detail_task_stall_rnp(rnp);
}

555 556
static void rcu_print_task_stall_begin(struct rcu_node *rnp)
{
557
	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
558 559 560 561 562
	       rnp->level, rnp->grplo, rnp->grphi);
}

static void rcu_print_task_stall_end(void)
{
563
	pr_cont("\n");
564 565
}

566 567 568 569
/*
 * Scan the current list of tasks blocked within RCU read-side critical
 * sections, printing out the tid of each.
 */
570
static int rcu_print_task_stall(struct rcu_node *rnp)
571 572
{
	struct task_struct *t;
573
	int ndetected = 0;
574

575
	if (!rcu_preempt_blocked_readers_cgp(rnp))
576
		return 0;
577
	rcu_print_task_stall_begin(rnp);
578
	t = list_entry(rnp->gp_tasks->prev,
579
		       struct task_struct, rcu_node_entry);
580
	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
581
		pr_cont(" P%d", t->pid);
582 583
		ndetected++;
	}
584
	rcu_print_task_stall_end();
585
	return ndetected;
586 587
}

588 589 590 591 592 593
/*
 * Check that the list of blocked tasks for the newly completed grace
 * period is in fact empty.  It is a serious bug to complete a grace
 * period that still has RCU readers blocked!  This function must be
 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
 * must be held by the caller.
594 595 596
 *
 * Also, if there are blocked tasks on the list, they automatically
 * block the newly created grace period, so set up ->gp_tasks accordingly.
597 598 599
 */
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
{
600
	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
601
	if (rcu_preempt_has_tasks(rnp))
602
		rnp->gp_tasks = rnp->blkd_tasks.next;
603
	WARN_ON_ONCE(rnp->qsmask);
604 605
}

606 607 608 609 610 611 612
/*
 * Check for a quiescent state from the current CPU.  When a task blocks,
 * the task is recorded in the corresponding CPU's rcu_node structure,
 * which is checked elsewhere.
 *
 * Caller must disable hard irqs.
 */
613
static void rcu_preempt_check_callbacks(void)
614 615 616 617
{
	struct task_struct *t = current;

	if (t->rcu_read_lock_nesting == 0) {
618
		rcu_preempt_qs();
619 620
		return;
	}
621
	if (t->rcu_read_lock_nesting > 0 &&
622 623
	    __this_cpu_read(rcu_data_p->qs_pending) &&
	    !__this_cpu_read(rcu_data_p->passed_quiesce))
624
		t->rcu_read_unlock_special.b.need_qs = true;
625 626
}

627 628
#ifdef CONFIG_RCU_BOOST

629 630
static void rcu_preempt_do_callbacks(void)
{
631
	rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
632 633
}

634 635
#endif /* #ifdef CONFIG_RCU_BOOST */

636
/*
P
Paul E. McKenney 已提交
637
 * Queue a preemptible-RCU callback for invocation after a grace period.
638 639 640
 */
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
641
	__call_rcu(head, func, rcu_state_p, -1, 0);
642 643 644
}
EXPORT_SYMBOL_GPL(call_rcu);

645 646 647 648 649
/**
 * synchronize_rcu - wait until a grace period has elapsed.
 *
 * Control will return to the caller some time after a full grace
 * period has elapsed, in other words after all currently executing RCU
650 651 652 653 654
 * read-side critical sections have completed.  Note, however, that
 * upon return from synchronize_rcu(), the caller might well be executing
 * concurrently with new RCU read-side critical sections that began while
 * synchronize_rcu() was waiting.  RCU read-side critical sections are
 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
655 656 657
 *
 * See the description of synchronize_sched() for more detailed information
 * on memory ordering guarantees.
658 659 660
 */
void synchronize_rcu(void)
{
661 662 663 664
	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
			 lock_is_held(&rcu_lock_map) ||
			 lock_is_held(&rcu_sched_lock_map),
			 "Illegal synchronize_rcu() in RCU read-side critical section");
665 666
	if (!rcu_scheduler_active)
		return;
667
	if (rcu_gp_is_expedited())
668 669 670
		synchronize_rcu_expedited();
	else
		wait_rcu_gp(call_rcu);
671 672 673
}
EXPORT_SYMBOL_GPL(synchronize_rcu);

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
/*
 * Remote handler for smp_call_function_single().  If there is an
 * RCU read-side critical section in effect, request that the
 * next rcu_read_unlock() record the quiescent state up the
 * ->expmask fields in the rcu_node tree.  Otherwise, immediately
 * report the quiescent state.
 */
static void sync_rcu_exp_handler(void *info)
{
	struct rcu_data *rdp;
	struct rcu_state *rsp = info;
	struct task_struct *t = current;

	/*
	 * Within an RCU read-side critical section, request that the next
	 * rcu_read_unlock() report.  Unless this RCU read-side critical
	 * section has already blocked, in which case it is already set
	 * up for the expedited grace period to wait on it.
	 */
	if (t->rcu_read_lock_nesting > 0 &&
	    !t->rcu_read_unlock_special.b.blocked) {
		t->rcu_read_unlock_special.b.exp_need_qs = true;
		return;
	}

	/*
	 * We are either exiting an RCU read-side critical section (negative
	 * values of t->rcu_read_lock_nesting) or are not in one at all
	 * (zero value of t->rcu_read_lock_nesting).  Or we are in an RCU
	 * read-side critical section that blocked before this expedited
	 * grace period started.  Either way, we can immediately report
	 * the quiescent state.
	 */
	rdp = this_cpu_ptr(rsp->rda);
	rcu_report_exp_rdp(rsp, rdp, true);
}

711
/*
712 713
 * Select the nodes that the upcoming expedited grace period needs
 * to wait for.
714
 */
715
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp)
716
{
717
	int cpu;
718
	unsigned long flags;
719 720 721 722
	unsigned long mask;
	unsigned long mask_ofl_test;
	unsigned long mask_ofl_ipi;
	int ret;
723
	struct rcu_node *rnp;
724

725 726 727
	sync_exp_reset_tree(rsp);
	rcu_for_each_leaf_node(rsp, rnp) {
		raw_spin_lock_irqsave(&rnp->lock, flags);
728
		smp_mb__after_unlock_lock();
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748

		/* Each pass checks a CPU for identity, offline, and idle. */
		mask_ofl_test = 0;
		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);

			if (raw_smp_processor_id() == cpu ||
			    cpu_is_offline(cpu) ||
			    !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
				mask_ofl_test |= rdp->grpmask;
		}
		mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;

		/*
		 * Need to wait for any blocked tasks as well.  Note that
		 * additional blocking tasks will also block the expedited
		 * GP until such time as the ->expmask bits are cleared.
		 */
		if (rcu_preempt_has_tasks(rnp))
749
			rnp->exp_tasks = rnp->blkd_tasks.next;
750 751 752 753 754 755 756 757 758 759 760 761
		raw_spin_unlock_irqrestore(&rnp->lock, flags);

		/* IPI the remaining CPUs for expedited quiescent state. */
		mask = 1;
		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
			if (!(mask_ofl_ipi & mask))
				continue;
			ret = smp_call_function_single(cpu,
						       sync_rcu_exp_handler,
						       rsp, 0);
			if (!ret)
				mask_ofl_ipi &= ~mask;
762
		}
763 764 765 766
		/* Report quiescent states for those that went offline. */
		mask_ofl_test |= mask_ofl_ipi;
		if (mask_ofl_test)
			rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
767
	}
768 769
}

770 771 772 773 774 775 776 777 778 779 780
/**
 * synchronize_rcu_expedited - Brute-force RCU grace period
 *
 * Wait for an RCU-preempt grace period, but expedite it.  The basic
 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
 * the ->blkd_tasks lists and wait for this list to drain.  This consumes
 * significant time on all CPUs and is unfriendly to real-time workloads,
 * so is thus not recommended for any sort of common-case code.
 * In fact, if you are using synchronize_rcu_expedited() in a loop,
 * please restructure your code to batch your updates, and then Use a
 * single synchronize_rcu() instead.
781 782 783
 */
void synchronize_rcu_expedited(void)
{
784
	struct rcu_node *rnp;
785
	struct rcu_node *rnp_unlock;
786
	struct rcu_state *rsp = rcu_state_p;
787
	unsigned long s;
788

789
	s = rcu_exp_gp_seq_snap(rsp);
790

791 792 793
	rnp_unlock = exp_funnel_lock(rsp, s);
	if (rnp_unlock == NULL)
		return;  /* Someone else did our work for us. */
794

795
	rcu_exp_gp_seq_start(rsp);
796

797
	/* Initialize the rcu_node tree in preparation for the wait. */
798
	sync_rcu_exp_select_cpus(rsp);
799

800
	/* Wait for snapshotted ->blkd_tasks lists to drain. */
801
	rnp = rcu_get_root(rsp);
802
	wait_event(rsp->expedited_wq,
803 804 805
		   sync_rcu_preempt_exp_done(rnp));

	/* Clean up and exit. */
806
	rcu_exp_gp_seq_end(rsp);
807
	mutex_unlock(&rnp_unlock->exp_funnel_mutex);
808 809 810
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);

811 812
/**
 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
813 814 815 816 817
 *
 * Note that this primitive does not necessarily wait for an RCU grace period
 * to complete.  For example, if there are no RCU callbacks queued anywhere
 * in the system, then rcu_barrier() is within its rights to return
 * immediately, without waiting for anything, much less an RCU grace period.
818 819 820
 */
void rcu_barrier(void)
{
821
	_rcu_barrier(rcu_state_p);
822 823 824
}
EXPORT_SYMBOL_GPL(rcu_barrier);

825
/*
P
Paul E. McKenney 已提交
826
 * Initialize preemptible RCU's state structures.
827 828 829
 */
static void __init __rcu_init_preempt(void)
{
830
	rcu_init_one(rcu_state_p, rcu_data_p);
831 832
}

833 834 835 836 837 838 839 840 841 842 843 844 845 846
/*
 * Check for a task exiting while in a preemptible-RCU read-side
 * critical section, clean up if so.  No need to issue warnings,
 * as debug_check_no_locks_held() already does this if lockdep
 * is enabled.
 */
void exit_rcu(void)
{
	struct task_struct *t = current;

	if (likely(list_empty(&current->rcu_node_entry)))
		return;
	t->rcu_read_lock_nesting = 1;
	barrier();
847
	t->rcu_read_unlock_special.b.blocked = true;
848 849 850
	__rcu_read_unlock();
}

851
#else /* #ifdef CONFIG_PREEMPT_RCU */
852

853
static struct rcu_state *const rcu_state_p = &rcu_sched_state;
854
static struct rcu_data __percpu *const rcu_data_p = &rcu_sched_data;
855

856 857 858
/*
 * Tell them what RCU they are running.
 */
859
static void __init rcu_bootup_announce(void)
860
{
861
	pr_info("Hierarchical RCU implementation.\n");
862
	rcu_bootup_announce_oddness();
863 864
}

865 866 867 868
/*
 * Because preemptible RCU does not exist, we never have to check for
 * CPUs being in quiescent states.
 */
869
static void rcu_preempt_note_context_switch(void)
870 871 872
{
}

873
/*
P
Paul E. McKenney 已提交
874
 * Because preemptible RCU does not exist, there are never any preempted
875 876
 * RCU readers.
 */
877
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
878 879 880 881
{
	return 0;
}

882 883 884 885
/*
 * Because there is no preemptible RCU, there can be no readers blocked.
 */
static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
886
{
887
	return false;
888 889
}

890
/*
P
Paul E. McKenney 已提交
891
 * Because preemptible RCU does not exist, we never have to check for
892 893 894 895 896 897
 * tasks blocked within RCU read-side critical sections.
 */
static void rcu_print_detail_task_stall(struct rcu_state *rsp)
{
}

898
/*
P
Paul E. McKenney 已提交
899
 * Because preemptible RCU does not exist, we never have to check for
900 901
 * tasks blocked within RCU read-side critical sections.
 */
902
static int rcu_print_task_stall(struct rcu_node *rnp)
903
{
904
	return 0;
905 906
}

907
/*
P
Paul E. McKenney 已提交
908
 * Because there is no preemptible RCU, there can be no readers blocked,
909 910
 * so there is no need to check for blocked tasks.  So check only for
 * bogus qsmask values.
911 912 913
 */
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
{
914
	WARN_ON_ONCE(rnp->qsmask);
915 916
}

917
/*
P
Paul E. McKenney 已提交
918
 * Because preemptible RCU does not exist, it never has any callbacks
919 920
 * to check.
 */
921
static void rcu_preempt_check_callbacks(void)
922 923 924
{
}

925 926
/*
 * Wait for an rcu-preempt grace period, but make it happen quickly.
P
Paul E. McKenney 已提交
927
 * But because preemptible RCU does not exist, map to rcu-sched.
928 929 930 931 932 933 934
 */
void synchronize_rcu_expedited(void)
{
	synchronize_sched_expedited();
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);

935
/*
P
Paul E. McKenney 已提交
936
 * Because preemptible RCU does not exist, rcu_barrier() is just
937 938 939 940 941 942 943 944
 * another name for rcu_barrier_sched().
 */
void rcu_barrier(void)
{
	rcu_barrier_sched();
}
EXPORT_SYMBOL_GPL(rcu_barrier);

945
/*
P
Paul E. McKenney 已提交
946
 * Because preemptible RCU does not exist, it need not be initialized.
947 948 949 950 951
 */
static void __init __rcu_init_preempt(void)
{
}

952 953 954 955 956 957 958 959
/*
 * Because preemptible RCU does not exist, tasks cannot possibly exit
 * while in preemptible RCU read-side critical sections.
 */
void exit_rcu(void)
{
}

960
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
961

962 963
#ifdef CONFIG_RCU_BOOST

964
#include "../locking/rtmutex_common.h"
965

966 967 968 969
#ifdef CONFIG_RCU_TRACE

static void rcu_initiate_boost_trace(struct rcu_node *rnp)
{
970
	if (!rcu_preempt_has_tasks(rnp))
971 972 973 974 975 976 977 978
		rnp->n_balk_blkd_tasks++;
	else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
		rnp->n_balk_exp_gp_tasks++;
	else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
		rnp->n_balk_boost_tasks++;
	else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
		rnp->n_balk_notblocked++;
	else if (rnp->gp_tasks != NULL &&
979
		 ULONG_CMP_LT(jiffies, rnp->boost_time))
980 981 982 983 984 985 986 987 988 989 990 991 992
		rnp->n_balk_notyet++;
	else
		rnp->n_balk_nos++;
}

#else /* #ifdef CONFIG_RCU_TRACE */

static void rcu_initiate_boost_trace(struct rcu_node *rnp)
{
}

#endif /* #else #ifdef CONFIG_RCU_TRACE */

T
Thomas Gleixner 已提交
993 994 995 996 997 998 999 1000 1001 1002
static void rcu_wake_cond(struct task_struct *t, int status)
{
	/*
	 * If the thread is yielding, only wake it when this
	 * is invoked from idle
	 */
	if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
		wake_up_process(t);
}

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
/*
 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
 * or ->boost_tasks, advancing the pointer to the next task in the
 * ->blkd_tasks list.
 *
 * Note that irqs must be enabled: boosting the task can block.
 * Returns 1 if there are more tasks needing to be boosted.
 */
static int rcu_boost(struct rcu_node *rnp)
{
	unsigned long flags;
	struct task_struct *t;
	struct list_head *tb;

1017 1018
	if (READ_ONCE(rnp->exp_tasks) == NULL &&
	    READ_ONCE(rnp->boost_tasks) == NULL)
1019 1020 1021
		return 0;  /* Nothing left to boost. */

	raw_spin_lock_irqsave(&rnp->lock, flags);
1022
	smp_mb__after_unlock_lock();
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038

	/*
	 * Recheck under the lock: all tasks in need of boosting
	 * might exit their RCU read-side critical sections on their own.
	 */
	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
		return 0;
	}

	/*
	 * Preferentially boost tasks blocking expedited grace periods.
	 * This cannot starve the normal grace periods because a second
	 * expedited grace period must boost all blocked tasks, including
	 * those blocking the pre-existing normal grace period.
	 */
1039
	if (rnp->exp_tasks != NULL) {
1040
		tb = rnp->exp_tasks;
1041 1042
		rnp->n_exp_boosts++;
	} else {
1043
		tb = rnp->boost_tasks;
1044 1045 1046
		rnp->n_normal_boosts++;
	}
	rnp->n_tasks_boosted++;
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064

	/*
	 * We boost task t by manufacturing an rt_mutex that appears to
	 * be held by task t.  We leave a pointer to that rt_mutex where
	 * task t can find it, and task t will release the mutex when it
	 * exits its outermost RCU read-side critical section.  Then
	 * simply acquiring this artificial rt_mutex will boost task
	 * t's priority.  (Thanks to tglx for suggesting this approach!)
	 *
	 * Note that task t must acquire rnp->lock to remove itself from
	 * the ->blkd_tasks list, which it will do from exit() if from
	 * nowhere else.  We therefore are guaranteed that task t will
	 * stay around at least until we drop rnp->lock.  Note that
	 * rnp->lock also resolves races between our priority boosting
	 * and task t's exiting its outermost RCU read-side critical
	 * section.
	 */
	t = container_of(tb, struct task_struct, rcu_node_entry);
1065
	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1066
	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1067 1068 1069
	/* Lock only for side effect: boosts task t's priority. */
	rt_mutex_lock(&rnp->boost_mtx);
	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
1070

1071 1072
	return READ_ONCE(rnp->exp_tasks) != NULL ||
	       READ_ONCE(rnp->boost_tasks) != NULL;
1073 1074 1075
}

/*
1076
 * Priority-boosting kthread, one per leaf rcu_node.
1077 1078 1079 1080 1081 1082 1083
 */
static int rcu_boost_kthread(void *arg)
{
	struct rcu_node *rnp = (struct rcu_node *)arg;
	int spincnt = 0;
	int more2boost;

1084
	trace_rcu_utilization(TPS("Start boost kthread@init"));
1085
	for (;;) {
1086
		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1087
		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1088
		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1089
		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1090
		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1091 1092 1093 1094 1095 1096
		more2boost = rcu_boost(rnp);
		if (more2boost)
			spincnt++;
		else
			spincnt = 0;
		if (spincnt > 10) {
T
Thomas Gleixner 已提交
1097
			rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1098
			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
T
Thomas Gleixner 已提交
1099
			schedule_timeout_interruptible(2);
1100
			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1101 1102 1103
			spincnt = 0;
		}
	}
1104
	/* NOTREACHED */
1105
	trace_rcu_utilization(TPS("End boost kthread@notreached"));
1106 1107 1108 1109 1110 1111 1112 1113 1114
	return 0;
}

/*
 * Check to see if it is time to start boosting RCU readers that are
 * blocking the current grace period, and, if so, tell the per-rcu_node
 * kthread to start boosting them.  If there is an expedited grace
 * period in progress, it is always time to boost.
 *
1115 1116 1117
 * The caller must hold rnp->lock, which this function releases.
 * The ->boost_kthread_task is immortal, so we don't need to worry
 * about it going away.
1118
 */
1119
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1120
	__releases(rnp->lock)
1121 1122 1123
{
	struct task_struct *t;

1124 1125
	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
		rnp->n_balk_exp_gp_tasks++;
1126
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1127
		return;
1128
	}
1129 1130 1131 1132 1133 1134 1135
	if (rnp->exp_tasks != NULL ||
	    (rnp->gp_tasks != NULL &&
	     rnp->boost_tasks == NULL &&
	     rnp->qsmask == 0 &&
	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
		if (rnp->exp_tasks == NULL)
			rnp->boost_tasks = rnp->gp_tasks;
1136
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1137
		t = rnp->boost_kthread_task;
T
Thomas Gleixner 已提交
1138 1139
		if (t)
			rcu_wake_cond(t, rnp->boost_kthread_status);
1140
	} else {
1141
		rcu_initiate_boost_trace(rnp);
1142 1143
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
	}
1144 1145
}

1146 1147 1148 1149 1150 1151 1152 1153 1154
/*
 * Wake up the per-CPU kthread to invoke RCU callbacks.
 */
static void invoke_rcu_callbacks_kthread(void)
{
	unsigned long flags;

	local_irq_save(flags);
	__this_cpu_write(rcu_cpu_has_work, 1);
1155
	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
T
Thomas Gleixner 已提交
1156 1157 1158 1159
	    current != __this_cpu_read(rcu_cpu_kthread_task)) {
		rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
			      __this_cpu_read(rcu_cpu_kthread_status));
	}
1160 1161 1162
	local_irq_restore(flags);
}

1163 1164 1165 1166 1167 1168
/*
 * Is the current CPU running the RCU-callbacks kthread?
 * Caller must have preemption disabled.
 */
static bool rcu_is_callbacks_kthread(void)
{
1169
	return __this_cpu_read(rcu_cpu_kthread_task) == current;
1170 1171
}

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)

/*
 * Do priority-boost accounting for the start of a new grace period.
 */
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
}

/*
 * Create an RCU-boost kthread for the specified node if one does not
 * already exist.  We only create this kthread for preemptible RCU.
 * Returns zero if all is well, a negated errno otherwise.
 */
1187
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1188
				       struct rcu_node *rnp)
1189
{
T
Thomas Gleixner 已提交
1190
	int rnp_index = rnp - &rsp->node[0];
1191 1192 1193 1194
	unsigned long flags;
	struct sched_param sp;
	struct task_struct *t;

1195
	if (rcu_state_p != rsp)
1196
		return 0;
T
Thomas Gleixner 已提交
1197

1198
	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
T
Thomas Gleixner 已提交
1199 1200
		return 0;

1201
	rsp->boost = 1;
1202 1203 1204
	if (rnp->boost_kthread_task != NULL)
		return 0;
	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1205
			   "rcub/%d", rnp_index);
1206 1207 1208
	if (IS_ERR(t))
		return PTR_ERR(t);
	raw_spin_lock_irqsave(&rnp->lock, flags);
1209
	smp_mb__after_unlock_lock();
1210 1211
	rnp->boost_kthread_task = t;
	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1212
	sp.sched_priority = kthread_prio;
1213
	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1214
	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1215 1216 1217
	return 0;
}

1218 1219
static void rcu_kthread_do_work(void)
{
1220 1221
	rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
	rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1222 1223 1224
	rcu_preempt_do_callbacks();
}

1225
static void rcu_cpu_kthread_setup(unsigned int cpu)
1226 1227 1228
{
	struct sched_param sp;

1229
	sp.sched_priority = kthread_prio;
1230
	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1231 1232
}

1233
static void rcu_cpu_kthread_park(unsigned int cpu)
1234
{
1235
	per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1236 1237
}

1238
static int rcu_cpu_kthread_should_run(unsigned int cpu)
1239
{
1240
	return __this_cpu_read(rcu_cpu_has_work);
1241 1242 1243 1244
}

/*
 * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1245 1246
 * RCU softirq used in flavors and configurations of RCU that do not
 * support RCU priority boosting.
1247
 */
1248
static void rcu_cpu_kthread(unsigned int cpu)
1249
{
1250 1251
	unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
	char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1252
	int spincnt;
1253

1254
	for (spincnt = 0; spincnt < 10; spincnt++) {
1255
		trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1256 1257
		local_bh_disable();
		*statusp = RCU_KTHREAD_RUNNING;
1258 1259
		this_cpu_inc(rcu_cpu_kthread_loops);
		local_irq_disable();
1260 1261
		work = *workp;
		*workp = 0;
1262
		local_irq_enable();
1263 1264 1265
		if (work)
			rcu_kthread_do_work();
		local_bh_enable();
1266
		if (*workp == 0) {
1267
			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1268 1269
			*statusp = RCU_KTHREAD_WAITING;
			return;
1270 1271
		}
	}
1272
	*statusp = RCU_KTHREAD_YIELDING;
1273
	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1274
	schedule_timeout_interruptible(2);
1275
	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1276
	*statusp = RCU_KTHREAD_WAITING;
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
}

/*
 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
 * served by the rcu_node in question.  The CPU hotplug lock is still
 * held, so the value of rnp->qsmaskinit will be stable.
 *
 * We don't include outgoingcpu in the affinity set, use -1 if there is
 * no outgoing CPU.  If there are no CPUs left in the affinity set,
 * this function allows the kthread to execute on any CPU.
 */
T
Thomas Gleixner 已提交
1288
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1289
{
T
Thomas Gleixner 已提交
1290
	struct task_struct *t = rnp->boost_kthread_task;
1291
	unsigned long mask = rcu_rnp_online_cpus(rnp);
1292 1293 1294
	cpumask_var_t cm;
	int cpu;

T
Thomas Gleixner 已提交
1295
	if (!t)
1296
		return;
T
Thomas Gleixner 已提交
1297
	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1298 1299 1300 1301
		return;
	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
		if ((mask & 0x1) && cpu != outgoingcpu)
			cpumask_set_cpu(cpu, cm);
1302
	if (cpumask_weight(cm) == 0)
1303
		cpumask_setall(cm);
T
Thomas Gleixner 已提交
1304
	set_cpus_allowed_ptr(t, cm);
1305 1306 1307
	free_cpumask_var(cm);
}

1308 1309 1310 1311 1312 1313 1314 1315
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
	.store			= &rcu_cpu_kthread_task,
	.thread_should_run	= rcu_cpu_kthread_should_run,
	.thread_fn		= rcu_cpu_kthread,
	.thread_comm		= "rcuc/%u",
	.setup			= rcu_cpu_kthread_setup,
	.park			= rcu_cpu_kthread_park,
};
1316 1317

/*
1318
 * Spawn boost kthreads -- called as soon as the scheduler is running.
1319
 */
1320
static void __init rcu_spawn_boost_kthreads(void)
1321 1322
{
	struct rcu_node *rnp;
T
Thomas Gleixner 已提交
1323
	int cpu;
1324

1325
	for_each_possible_cpu(cpu)
1326
		per_cpu(rcu_cpu_has_work, cpu) = 0;
1327
	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1328 1329
	rcu_for_each_leaf_node(rcu_state_p, rnp)
		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1330 1331
}

1332
static void rcu_prepare_kthreads(int cpu)
1333
{
1334
	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
1335 1336 1337
	struct rcu_node *rnp = rdp->mynode;

	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1338
	if (rcu_scheduler_fully_active)
1339
		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1340 1341
}

1342 1343
#else /* #ifdef CONFIG_RCU_BOOST */

1344
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1345
	__releases(rnp->lock)
1346
{
1347
	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1348 1349
}

1350
static void invoke_rcu_callbacks_kthread(void)
1351
{
1352
	WARN_ON_ONCE(1);
1353 1354
}

1355 1356 1357 1358 1359
static bool rcu_is_callbacks_kthread(void)
{
	return false;
}

1360 1361 1362 1363
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}

T
Thomas Gleixner 已提交
1364
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1365 1366 1367
{
}

1368
static void __init rcu_spawn_boost_kthreads(void)
1369 1370 1371
{
}

1372
static void rcu_prepare_kthreads(int cpu)
1373 1374 1375
{
}

1376 1377
#endif /* #else #ifdef CONFIG_RCU_BOOST */

1378 1379 1380 1381 1382 1383 1384 1385
#if !defined(CONFIG_RCU_FAST_NO_HZ)

/*
 * Check to see if any future RCU-related work will need to be done
 * by the current CPU, even if none need be done immediately, returning
 * 1 if so.  This function is part of the RCU implementation; it is -not-
 * an exported member of the RCU API.
 *
1386 1387
 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
 * any flavor of RCU.
1388
 */
1389
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1390
{
1391
	*nextevt = KTIME_MAX;
1392 1393
	return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
	       ? 0 : rcu_cpu_has_callbacks(NULL);
1394 1395 1396 1397 1398 1399
}

/*
 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
 * after it.
 */
1400
static void rcu_cleanup_after_idle(void)
1401 1402 1403
{
}

1404
/*
1405
 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1406 1407
 * is nothing.
 */
1408
static void rcu_prepare_for_idle(void)
1409 1410 1411
{
}

1412 1413 1414 1415 1416 1417 1418 1419
/*
 * Don't bother keeping a running count of the number of RCU callbacks
 * posted because CONFIG_RCU_FAST_NO_HZ=n.
 */
static void rcu_idle_count_callbacks_posted(void)
{
}

1420 1421
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
/*
 * This code is invoked when a CPU goes idle, at which point we want
 * to have the CPU do everything required for RCU so that it can enter
 * the energy-efficient dyntick-idle mode.  This is handled by a
 * state machine implemented by rcu_prepare_for_idle() below.
 *
 * The following three proprocessor symbols control this state machine:
 *
 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
 *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
 *	is sized to be roughly one RCU grace period.  Those energy-efficiency
 *	benchmarkers who might otherwise be tempted to set this to a large
 *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
 *	system.  And if you are -that- concerned about energy efficiency,
 *	just power the system down and be done with it!
1437 1438 1439
 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
 *	permitted to sleep in dyntick-idle mode with only lazy RCU
 *	callbacks pending.  Setting this too high can OOM your system.
1440 1441 1442 1443 1444
 *
 * The values below work well in practice.  If future workloads require
 * adjustment, they can be converted into kernel config parameters, though
 * making the state machine smarter might be a better option.
 */
1445
#define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
1446
#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
1447

1448 1449 1450 1451
static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
module_param(rcu_idle_gp_delay, int, 0644);
static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
module_param(rcu_idle_lazy_gp_delay, int, 0644);
1452 1453

/*
1454 1455 1456
 * Try to advance callbacks for all flavors of RCU on the current CPU, but
 * only if it has been awhile since the last time we did so.  Afterwards,
 * if there are any callbacks ready for immediate invocation, return true.
1457
 */
1458
static bool __maybe_unused rcu_try_advance_all_cbs(void)
1459
{
1460 1461
	bool cbs_ready = false;
	struct rcu_data *rdp;
1462
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1463 1464
	struct rcu_node *rnp;
	struct rcu_state *rsp;
1465

1466 1467
	/* Exit early if we advanced recently. */
	if (jiffies == rdtp->last_advance_all)
1468
		return false;
1469 1470
	rdtp->last_advance_all = jiffies;

1471 1472 1473
	for_each_rcu_flavor(rsp) {
		rdp = this_cpu_ptr(rsp->rda);
		rnp = rdp->mynode;
1474

1475 1476 1477 1478 1479
		/*
		 * Don't bother checking unless a grace period has
		 * completed since we last checked and there are
		 * callbacks not yet ready to invoke.
		 */
1480
		if ((rdp->completed != rnp->completed ||
1481
		     unlikely(READ_ONCE(rdp->gpwrap))) &&
1482
		    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1483
			note_gp_changes(rsp, rdp);
1484

1485 1486 1487 1488
		if (cpu_has_callbacks_ready_to_invoke(rdp))
			cbs_ready = true;
	}
	return cbs_ready;
1489 1490
}

1491
/*
1492 1493 1494 1495
 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
 * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
 * caller to set the timeout based on whether or not there are non-lazy
 * callbacks.
1496
 *
1497
 * The caller must have disabled interrupts.
1498
 */
1499
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1500
{
1501
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1502
	unsigned long dj;
1503

1504
	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) {
1505
		*nextevt = KTIME_MAX;
1506 1507 1508
		return 0;
	}

1509 1510 1511
	/* Snapshot to detect later posting of non-lazy callback. */
	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;

1512
	/* If no callbacks, RCU doesn't need the CPU. */
1513
	if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1514
		*nextevt = KTIME_MAX;
1515 1516
		return 0;
	}
1517 1518 1519 1520 1521

	/* Attempt to advance callbacks. */
	if (rcu_try_advance_all_cbs()) {
		/* Some ready to invoke, so initiate later invocation. */
		invoke_rcu_core();
1522 1523
		return 1;
	}
1524 1525 1526
	rdtp->last_accelerate = jiffies;

	/* Request timer delay depending on laziness, and round. */
1527
	if (!rdtp->all_lazy) {
1528
		dj = round_up(rcu_idle_gp_delay + jiffies,
1529
			       rcu_idle_gp_delay) - jiffies;
1530
	} else {
1531
		dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1532
	}
1533
	*nextevt = basemono + dj * TICK_NSEC;
1534 1535 1536
	return 0;
}

1537
/*
1538 1539 1540 1541 1542 1543
 * Prepare a CPU for idle from an RCU perspective.  The first major task
 * is to sense whether nohz mode has been enabled or disabled via sysfs.
 * The second major task is to check to see if a non-lazy callback has
 * arrived at a CPU that previously had only lazy callbacks.  The third
 * major task is to accelerate (that is, assign grace-period numbers to)
 * any recently arrived callbacks.
1544 1545
 *
 * The caller must have disabled interrupts.
1546
 */
1547
static void rcu_prepare_for_idle(void)
1548
{
1549
	bool needwake;
1550
	struct rcu_data *rdp;
1551
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1552 1553
	struct rcu_node *rnp;
	struct rcu_state *rsp;
1554 1555
	int tne;

1556 1557 1558
	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL))
		return;

1559
	/* Handle nohz enablement switches conservatively. */
1560
	tne = READ_ONCE(tick_nohz_active);
1561
	if (tne != rdtp->tick_nohz_enabled_snap) {
1562
		if (rcu_cpu_has_callbacks(NULL))
1563 1564 1565 1566 1567 1568
			invoke_rcu_core(); /* force nohz to see update. */
		rdtp->tick_nohz_enabled_snap = tne;
		return;
	}
	if (!tne)
		return;
1569

1570
	/* If this is a no-CBs CPU, no callbacks, just return. */
1571
	if (rcu_is_nocb_cpu(smp_processor_id()))
1572 1573
		return;

1574
	/*
1575 1576 1577
	 * If a non-lazy callback arrived at a CPU having only lazy
	 * callbacks, invoke RCU core for the side-effect of recalculating
	 * idle duration on re-entry to idle.
1578
	 */
1579 1580
	if (rdtp->all_lazy &&
	    rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1581 1582
		rdtp->all_lazy = false;
		rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1583
		invoke_rcu_core();
1584 1585 1586
		return;
	}

1587
	/*
1588 1589
	 * If we have not yet accelerated this jiffy, accelerate all
	 * callbacks on this CPU.
1590
	 */
1591
	if (rdtp->last_accelerate == jiffies)
1592
		return;
1593 1594
	rdtp->last_accelerate = jiffies;
	for_each_rcu_flavor(rsp) {
1595
		rdp = this_cpu_ptr(rsp->rda);
1596 1597 1598 1599
		if (!*rdp->nxttail[RCU_DONE_TAIL])
			continue;
		rnp = rdp->mynode;
		raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1600
		smp_mb__after_unlock_lock();
1601
		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1602
		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1603 1604
		if (needwake)
			rcu_gp_kthread_wake(rsp);
1605
	}
1606
}
1607

1608 1609 1610 1611 1612
/*
 * Clean up for exit from idle.  Attempt to advance callbacks based on
 * any grace periods that elapsed while the CPU was idle, and if any
 * callbacks are now ready to invoke, initiate invocation.
 */
1613
static void rcu_cleanup_after_idle(void)
1614
{
1615 1616
	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
	    rcu_is_nocb_cpu(smp_processor_id()))
1617
		return;
1618 1619
	if (rcu_try_advance_all_cbs())
		invoke_rcu_core();
1620 1621
}

1622
/*
1623 1624 1625 1626 1627 1628
 * Keep a running count of the number of non-lazy callbacks posted
 * on this CPU.  This running counter (which is never decremented) allows
 * rcu_prepare_for_idle() to detect when something out of the idle loop
 * posts a callback, even if an equal number of callbacks are invoked.
 * Of course, callbacks should only be posted from within a trace event
 * designed to be called from idle or from within RCU_NONIDLE().
1629 1630 1631
 */
static void rcu_idle_count_callbacks_posted(void)
{
1632
	__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1633 1634
}

1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
/*
 * Data for flushing lazy RCU callbacks at OOM time.
 */
static atomic_t oom_callback_count;
static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);

/*
 * RCU OOM callback -- decrement the outstanding count and deliver the
 * wake-up if we are the last one.
 */
static void rcu_oom_callback(struct rcu_head *rhp)
{
	if (atomic_dec_and_test(&oom_callback_count))
		wake_up(&oom_callback_wq);
}

/*
 * Post an rcu_oom_notify callback on the current CPU if it has at
 * least one lazy callback.  This will unnecessarily post callbacks
 * to CPUs that already have a non-lazy callback at the end of their
 * callback list, but this is an infrequent operation, so accept some
 * extra overhead to keep things simple.
 */
static void rcu_oom_notify_cpu(void *unused)
{
	struct rcu_state *rsp;
	struct rcu_data *rdp;

	for_each_rcu_flavor(rsp) {
1664
		rdp = raw_cpu_ptr(rsp->rda);
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
		if (rdp->qlen_lazy != 0) {
			atomic_inc(&oom_callback_count);
			rsp->call(&rdp->oom_head, rcu_oom_callback);
		}
	}
}

/*
 * If low on memory, ensure that each CPU has a non-lazy callback.
 * This will wake up CPUs that have only lazy callbacks, in turn
 * ensuring that they free up the corresponding memory in a timely manner.
 * Because an uncertain amount of memory will be freed in some uncertain
 * timeframe, we do not claim to have freed anything.
 */
static int rcu_oom_notify(struct notifier_block *self,
			  unsigned long notused, void *nfreed)
{
	int cpu;

	/* Wait for callbacks from earlier instance to complete. */
	wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1686
	smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1687 1688 1689 1690 1691 1692 1693 1694 1695

	/*
	 * Prevent premature wakeup: ensure that all increments happen
	 * before there is a chance of the counter reaching zero.
	 */
	atomic_set(&oom_callback_count, 1);

	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1696
		cond_resched_rcu_qs();
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
	}

	/* Unconditionally decrement: no need to wake ourselves up. */
	atomic_dec(&oom_callback_count);

	return NOTIFY_OK;
}

static struct notifier_block rcu_oom_nb = {
	.notifier_call = rcu_oom_notify
};

static int __init rcu_register_oom_notifier(void)
{
	register_oom_notifier(&rcu_oom_nb);
	return 0;
}
early_initcall(rcu_register_oom_notifier);

1716
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1717 1718 1719 1720 1721

#ifdef CONFIG_RCU_FAST_NO_HZ

static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
{
1722
	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1723
	unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1724

1725 1726 1727 1728 1729
	sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
		rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
		ulong2long(nlpd),
		rdtp->all_lazy ? 'L' : '.',
		rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1730 1731 1732 1733 1734 1735
}

#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */

static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
{
1736
	*cp = '\0';
1737 1738 1739 1740 1741 1742 1743
}

#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */

/* Initiate the stall-info list. */
static void print_cpu_stall_info_begin(void)
{
1744
	pr_cont("\n");
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
}

/*
 * Print out diagnostic information for the specified stalled CPU.
 *
 * If the specified CPU is aware of the current RCU grace period
 * (flavor specified by rsp), then print the number of scheduling
 * clock interrupts the CPU has taken during the time that it has
 * been aware.  Otherwise, print the number of RCU grace periods
 * that this CPU is ignorant of, for example, "1" if the CPU was
 * aware of the previous grace period.
 *
 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
 */
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
{
	char fast_no_hz[72];
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
	struct rcu_dynticks *rdtp = rdp->dynticks;
	char *ticks_title;
	unsigned long ticks_value;

	if (rsp->gpnum == rdp->gpnum) {
		ticks_title = "ticks this GP";
		ticks_value = rdp->ticks_this_gp;
	} else {
		ticks_title = "GPs behind";
		ticks_value = rsp->gpnum - rdp->gpnum;
	}
	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1775
	pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1776 1777 1778
	       cpu, ticks_value, ticks_title,
	       atomic_read(&rdtp->dynticks) & 0xfff,
	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1779
	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1780
	       READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1781 1782 1783 1784 1785 1786
	       fast_no_hz);
}

/* Terminate the stall-info list. */
static void print_cpu_stall_info_end(void)
{
1787
	pr_err("\t");
1788 1789 1790 1791 1792 1793
}

/* Zero ->ticks_this_gp for all flavors of RCU. */
static void zero_cpu_stall_ticks(struct rcu_data *rdp)
{
	rdp->ticks_this_gp = 0;
1794
	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1795 1796 1797 1798 1799
}

/* Increment ->ticks_this_gp for all flavors of RCU. */
static void increment_cpu_stall_ticks(void)
{
1800 1801 1802
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp)
1803
		raw_cpu_inc(rsp->rda->ticks_this_gp);
1804 1805
}

P
Paul E. McKenney 已提交
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
#ifdef CONFIG_RCU_NOCB_CPU

/*
 * Offload callback processing from the boot-time-specified set of CPUs
 * specified by rcu_nocb_mask.  For each CPU in the set, there is a
 * kthread created that pulls the callbacks from the corresponding CPU,
 * waits for a grace period to elapse, and invokes the callbacks.
 * The no-CBs CPUs do a wake_up() on their kthread when they insert
 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
 * has been specified, in which case each kthread actively polls its
 * CPU.  (Which isn't so great for energy efficiency, but which does
 * reduce RCU's overhead on that CPU.)
 *
 * This is intended to be used in conjunction with Frederic Weisbecker's
 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
 * running CPU-bound user-mode computations.
 *
 * Offloading of callback processing could also in theory be used as
 * an energy-efficiency measure because CPUs with no RCU callbacks
 * queued are more aggressive about entering dyntick-idle mode.
 */


/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
static int __init rcu_nocb_setup(char *str)
{
	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
	have_rcu_nocb_mask = true;
	cpulist_parse(str, rcu_nocb_mask);
	return 1;
}
__setup("rcu_nocbs=", rcu_nocb_setup);

1839 1840 1841 1842 1843 1844 1845
static int __init parse_rcu_nocb_poll(char *arg)
{
	rcu_nocb_poll = 1;
	return 0;
}
early_param("rcu_nocb_poll", parse_rcu_nocb_poll);

1846
/*
1847 1848
 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
 * grace period.
1849
 */
1850
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1851
{
1852
	wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
1853 1854 1855
}

/*
1856
 * Set the root rcu_node structure's ->need_future_gp field
1857 1858 1859 1860 1861
 * based on the sum of those of all rcu_node structures.  This does
 * double-count the root rcu_node structure's requests, but this
 * is necessary to handle the possibility of a rcu_nocb_kthread()
 * having awakened during the time that the rcu_node structures
 * were being updated for the end of the previous grace period.
1862
 */
1863 1864
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
{
1865
	rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
1866 1867 1868
}

static void rcu_init_one_nocb(struct rcu_node *rnp)
1869
{
1870 1871
	init_waitqueue_head(&rnp->nocb_gp_wq[0]);
	init_waitqueue_head(&rnp->nocb_gp_wq[1]);
1872 1873
}

1874
#ifndef CONFIG_RCU_NOCB_CPU_ALL
L
Liu Ping Fan 已提交
1875
/* Is the specified CPU a no-CBs CPU? */
1876
bool rcu_is_nocb_cpu(int cpu)
P
Paul E. McKenney 已提交
1877 1878 1879 1880 1881
{
	if (have_rcu_nocb_mask)
		return cpumask_test_cpu(cpu, rcu_nocb_mask);
	return false;
}
1882
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
P
Paul E. McKenney 已提交
1883

1884 1885 1886 1887 1888 1889 1890
/*
 * Kick the leader kthread for this NOCB group.
 */
static void wake_nocb_leader(struct rcu_data *rdp, bool force)
{
	struct rcu_data *rdp_leader = rdp->nocb_leader;

1891
	if (!READ_ONCE(rdp_leader->nocb_kthread))
1892
		return;
1893
	if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
1894
		/* Prior smp_mb__after_atomic() orders against prior enqueue. */
1895
		WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1896 1897 1898 1899
		wake_up(&rdp_leader->nocb_wq);
	}
}

1900 1901 1902 1903 1904 1905 1906
/*
 * Does the specified CPU need an RCU callback for the specified flavor
 * of rcu_barrier()?
 */
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
{
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1907 1908
	unsigned long ret;
#ifdef CONFIG_PROVE_RCU
1909
	struct rcu_head *rhp;
1910
#endif /* #ifdef CONFIG_PROVE_RCU */
1911

1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
	/*
	 * Check count of all no-CBs callbacks awaiting invocation.
	 * There needs to be a barrier before this function is called,
	 * but associated with a prior determination that no more
	 * callbacks would be posted.  In the worst case, the first
	 * barrier in _rcu_barrier() suffices (but the caller cannot
	 * necessarily rely on this, not a substitute for the caller
	 * getting the concurrency design right!).  There must also be
	 * a barrier between the following load an posting of a callback
	 * (if a callback is in fact needed).  This is associated with an
	 * atomic_inc() in the caller.
	 */
	ret = atomic_long_read(&rdp->nocb_q_count);
1925

1926
#ifdef CONFIG_PROVE_RCU
1927
	rhp = READ_ONCE(rdp->nocb_head);
1928
	if (!rhp)
1929
		rhp = READ_ONCE(rdp->nocb_gp_head);
1930
	if (!rhp)
1931
		rhp = READ_ONCE(rdp->nocb_follower_head);
1932 1933

	/* Having no rcuo kthread but CBs after scheduler starts is bad! */
1934
	if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
1935
	    rcu_scheduler_fully_active) {
1936 1937 1938 1939 1940
		/* RCU callback enqueued before CPU first came online??? */
		pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
		       cpu, rhp->func);
		WARN_ON_ONCE(1);
	}
1941
#endif /* #ifdef CONFIG_PROVE_RCU */
1942

1943
	return !!ret;
1944 1945
}

P
Paul E. McKenney 已提交
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
/*
 * Enqueue the specified string of rcu_head structures onto the specified
 * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
 * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
 * counts are supplied by rhcount and rhcount_lazy.
 *
 * If warranted, also wake up the kthread servicing this CPUs queues.
 */
static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
				    struct rcu_head *rhp,
				    struct rcu_head **rhtp,
1957 1958
				    int rhcount, int rhcount_lazy,
				    unsigned long flags)
P
Paul E. McKenney 已提交
1959 1960 1961 1962 1963 1964
{
	int len;
	struct rcu_head **old_rhpp;
	struct task_struct *t;

	/* Enqueue the callback on the nocb list and update counts. */
1965 1966
	atomic_long_add(rhcount, &rdp->nocb_q_count);
	/* rcu_barrier() relies on ->nocb_q_count add before xchg. */
P
Paul E. McKenney 已提交
1967
	old_rhpp = xchg(&rdp->nocb_tail, rhtp);
1968
	WRITE_ONCE(*old_rhpp, rhp);
P
Paul E. McKenney 已提交
1969
	atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
1970
	smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
P
Paul E. McKenney 已提交
1971 1972

	/* If we are not being polled and there is a kthread, awaken it ... */
1973
	t = READ_ONCE(rdp->nocb_kthread);
1974
	if (rcu_nocb_poll || !t) {
1975 1976
		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
				    TPS("WakeNotPoll"));
P
Paul E. McKenney 已提交
1977
		return;
1978
	}
P
Paul E. McKenney 已提交
1979 1980
	len = atomic_long_read(&rdp->nocb_q_count);
	if (old_rhpp == &rdp->nocb_head) {
1981
		if (!irqs_disabled_flags(flags)) {
1982 1983
			/* ... if queue was empty ... */
			wake_nocb_leader(rdp, false);
1984 1985 1986
			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
					    TPS("WakeEmpty"));
		} else {
1987
			rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
1988 1989 1990
			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
					    TPS("WakeEmptyIsDeferred"));
		}
P
Paul E. McKenney 已提交
1991 1992
		rdp->qlen_last_fqs_check = 0;
	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
1993
		/* ... or if many callbacks queued. */
1994 1995 1996 1997 1998 1999 2000 2001 2002
		if (!irqs_disabled_flags(flags)) {
			wake_nocb_leader(rdp, true);
			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
					    TPS("WakeOvf"));
		} else {
			rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
					    TPS("WakeOvfIsDeferred"));
		}
P
Paul E. McKenney 已提交
2003
		rdp->qlen_last_fqs_check = LONG_MAX / 2;
2004 2005
	} else {
		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
P
Paul E. McKenney 已提交
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
	}
	return;
}

/*
 * This is a helper for __call_rcu(), which invokes this when the normal
 * callback queue is inoperable.  If this is not a no-CBs CPU, this
 * function returns failure back to __call_rcu(), which can complain
 * appropriately.
 *
 * Otherwise, this function queues the callback where the corresponding
 * "rcuo" kthread can find it.
 */
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2020
			    bool lazy, unsigned long flags)
P
Paul E. McKenney 已提交
2021 2022
{

2023
	if (!rcu_is_nocb_cpu(rdp->cpu))
2024
		return false;
2025
	__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
2026 2027 2028
	if (__is_kfree_rcu_offset((unsigned long)rhp->func))
		trace_rcu_kfree_callback(rdp->rsp->name, rhp,
					 (unsigned long)rhp->func,
2029 2030
					 -atomic_long_read(&rdp->nocb_q_count_lazy),
					 -atomic_long_read(&rdp->nocb_q_count));
2031 2032
	else
		trace_rcu_callback(rdp->rsp->name, rhp,
2033 2034
				   -atomic_long_read(&rdp->nocb_q_count_lazy),
				   -atomic_long_read(&rdp->nocb_q_count));
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045

	/*
	 * If called from an extended quiescent state with interrupts
	 * disabled, invoke the RCU core in order to allow the idle-entry
	 * deferred-wakeup check to function.
	 */
	if (irqs_disabled_flags(flags) &&
	    !rcu_is_watching() &&
	    cpu_online(smp_processor_id()))
		invoke_rcu_core();

2046
	return true;
P
Paul E. McKenney 已提交
2047 2048 2049 2050 2051 2052 2053
}

/*
 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
 * not a no-CBs CPU.
 */
static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2054 2055
						     struct rcu_data *rdp,
						     unsigned long flags)
P
Paul E. McKenney 已提交
2056 2057 2058 2059 2060
{
	long ql = rsp->qlen;
	long qll = rsp->qlen_lazy;

	/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2061
	if (!rcu_is_nocb_cpu(smp_processor_id()))
2062
		return false;
P
Paul E. McKenney 已提交
2063 2064 2065 2066 2067 2068
	rsp->qlen = 0;
	rsp->qlen_lazy = 0;

	/* First, enqueue the donelist, if any.  This preserves CB ordering. */
	if (rsp->orphan_donelist != NULL) {
		__call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2069
					rsp->orphan_donetail, ql, qll, flags);
P
Paul E. McKenney 已提交
2070 2071 2072 2073 2074 2075
		ql = qll = 0;
		rsp->orphan_donelist = NULL;
		rsp->orphan_donetail = &rsp->orphan_donelist;
	}
	if (rsp->orphan_nxtlist != NULL) {
		__call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2076
					rsp->orphan_nxttail, ql, qll, flags);
P
Paul E. McKenney 已提交
2077 2078 2079 2080
		ql = qll = 0;
		rsp->orphan_nxtlist = NULL;
		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
	}
2081
	return true;
P
Paul E. McKenney 已提交
2082 2083 2084
}

/*
2085 2086
 * If necessary, kick off a new grace period, and either way wait
 * for a subsequent grace period to complete.
P
Paul E. McKenney 已提交
2087
 */
2088
static void rcu_nocb_wait_gp(struct rcu_data *rdp)
P
Paul E. McKenney 已提交
2089
{
2090
	unsigned long c;
2091
	bool d;
2092
	unsigned long flags;
2093
	bool needwake;
2094 2095 2096
	struct rcu_node *rnp = rdp->mynode;

	raw_spin_lock_irqsave(&rnp->lock, flags);
2097
	smp_mb__after_unlock_lock();
2098
	needwake = rcu_start_future_gp(rnp, rdp, &c);
2099
	raw_spin_unlock_irqrestore(&rnp->lock, flags);
2100 2101
	if (needwake)
		rcu_gp_kthread_wake(rdp->rsp);
P
Paul E. McKenney 已提交
2102 2103

	/*
2104 2105
	 * Wait for the grace period.  Do so interruptibly to avoid messing
	 * up the load average.
P
Paul E. McKenney 已提交
2106
	 */
2107
	trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2108
	for (;;) {
2109 2110
		wait_event_interruptible(
			rnp->nocb_gp_wq[c & 0x1],
2111
			(d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
2112
		if (likely(d))
2113
			break;
2114
		WARN_ON(signal_pending(current));
2115
		trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2116
	}
2117
	trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2118
	smp_mb(); /* Ensure that CB invocation happens after GP end. */
P
Paul E. McKenney 已提交
2119 2120
}

2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
/*
 * Leaders come here to wait for additional callbacks to show up.
 * This function does not return until callbacks appear.
 */
static void nocb_leader_wait(struct rcu_data *my_rdp)
{
	bool firsttime = true;
	bool gotcbs;
	struct rcu_data *rdp;
	struct rcu_head **tail;

wait_again:

	/* Wait for callbacks to appear. */
	if (!rcu_nocb_poll) {
		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
		wait_event_interruptible(my_rdp->nocb_wq,
2138
				!READ_ONCE(my_rdp->nocb_leader_sleep));
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
		/* Memory barrier handled by smp_mb() calls below and repoll. */
	} else if (firsttime) {
		firsttime = false; /* Don't drown trace log with "Poll"! */
		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
	}

	/*
	 * Each pass through the following loop checks a follower for CBs.
	 * We are our own first follower.  Any CBs found are moved to
	 * nocb_gp_head, where they await a grace period.
	 */
	gotcbs = false;
	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2152
		rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
2153 2154 2155 2156
		if (!rdp->nocb_gp_head)
			continue;  /* No CBs here, try next follower. */

		/* Move callbacks to wait-for-GP list, which is empty. */
2157
		WRITE_ONCE(rdp->nocb_head, NULL);
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
		rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
		gotcbs = true;
	}

	/*
	 * If there were no callbacks, sleep a bit, rescan after a
	 * memory barrier, and go retry.
	 */
	if (unlikely(!gotcbs)) {
		if (!rcu_nocb_poll)
			trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
					    "WokeEmpty");
2170
		WARN_ON(signal_pending(current));
2171 2172 2173
		schedule_timeout_interruptible(1);

		/* Rescan in case we were a victim of memory ordering. */
2174 2175
		my_rdp->nocb_leader_sleep = true;
		smp_mb();  /* Ensure _sleep true before scan. */
2176
		for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
2177
			if (READ_ONCE(rdp->nocb_head)) {
2178
				/* Found CB, so short-circuit next wait. */
2179
				my_rdp->nocb_leader_sleep = false;
2180 2181 2182 2183 2184 2185 2186 2187 2188
				break;
			}
		goto wait_again;
	}

	/* Wait for one grace period. */
	rcu_nocb_wait_gp(my_rdp);

	/*
2189 2190
	 * We left ->nocb_leader_sleep unset to reduce cache thrashing.
	 * We set it now, but recheck for new callbacks while
2191 2192
	 * traversing our follower list.
	 */
2193 2194
	my_rdp->nocb_leader_sleep = true;
	smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */
2195 2196 2197

	/* Each pass through the following loop wakes a follower, if needed. */
	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2198
		if (READ_ONCE(rdp->nocb_head))
2199
			my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
2200 2201 2202 2203 2204 2205
		if (!rdp->nocb_gp_head)
			continue; /* No CBs, so no need to wake follower. */

		/* Append callbacks to follower's "done" list. */
		tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
		*tail = rdp->nocb_gp_head;
2206
		smp_mb__after_atomic(); /* Store *tail before wakeup. */
2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
		if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
			/*
			 * List was empty, wake up the follower.
			 * Memory barriers supplied by atomic_long_add().
			 */
			wake_up(&rdp->nocb_wq);
		}
	}

	/* If we (the leader) don't have CBs, go wait some more. */
	if (!my_rdp->nocb_follower_head)
		goto wait_again;
}

/*
 * Followers come here to wait for additional callbacks to show up.
 * This function does not return until callbacks appear.
 */
static void nocb_follower_wait(struct rcu_data *rdp)
{
	bool firsttime = true;

	for (;;) {
		if (!rcu_nocb_poll) {
			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
					    "FollowerSleep");
			wait_event_interruptible(rdp->nocb_wq,
2234
						 READ_ONCE(rdp->nocb_follower_head));
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246
		} else if (firsttime) {
			/* Don't drown trace log with "Poll"! */
			firsttime = false;
			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
		}
		if (smp_load_acquire(&rdp->nocb_follower_head)) {
			/* ^^^ Ensure CB invocation follows _head test. */
			return;
		}
		if (!rcu_nocb_poll)
			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
					    "WokeEmpty");
2247
		WARN_ON(signal_pending(current));
2248 2249 2250 2251
		schedule_timeout_interruptible(1);
	}
}

P
Paul E. McKenney 已提交
2252 2253
/*
 * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2254 2255 2256
 * callbacks queued by the corresponding no-CBs CPU, however, there is
 * an optional leader-follower relationship so that the grace-period
 * kthreads don't have to do quite so many wakeups.
P
Paul E. McKenney 已提交
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267
 */
static int rcu_nocb_kthread(void *arg)
{
	int c, cl;
	struct rcu_head *list;
	struct rcu_head *next;
	struct rcu_head **tail;
	struct rcu_data *rdp = arg;

	/* Each pass through this loop invokes one batch of callbacks */
	for (;;) {
2268 2269 2270 2271 2272 2273 2274
		/* Wait for callbacks. */
		if (rdp->nocb_leader == rdp)
			nocb_leader_wait(rdp);
		else
			nocb_follower_wait(rdp);

		/* Pull the ready-to-invoke callbacks onto local list. */
2275
		list = READ_ONCE(rdp->nocb_follower_head);
2276 2277
		BUG_ON(!list);
		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2278
		WRITE_ONCE(rdp->nocb_follower_head, NULL);
2279
		tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
P
Paul E. McKenney 已提交
2280 2281

		/* Each pass through the following loop invokes a callback. */
2282 2283 2284
		trace_rcu_batch_start(rdp->rsp->name,
				      atomic_long_read(&rdp->nocb_q_count_lazy),
				      atomic_long_read(&rdp->nocb_q_count), -1);
P
Paul E. McKenney 已提交
2285 2286 2287 2288 2289
		c = cl = 0;
		while (list) {
			next = list->next;
			/* Wait for enqueuing to complete, if needed. */
			while (next == NULL && &list->next != tail) {
2290 2291
				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
						    TPS("WaitQueue"));
P
Paul E. McKenney 已提交
2292
				schedule_timeout_interruptible(1);
2293 2294
				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
						    TPS("WokeQueue"));
P
Paul E. McKenney 已提交
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305
				next = list->next;
			}
			debug_rcu_head_unqueue(list);
			local_bh_disable();
			if (__rcu_reclaim(rdp->rsp->name, list))
				cl++;
			c++;
			local_bh_enable();
			list = next;
		}
		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2306 2307 2308
		smp_mb__before_atomic();  /* _add after CB invocation. */
		atomic_long_add(-c, &rdp->nocb_q_count);
		atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2309
		rdp->n_nocbs_invoked += c;
P
Paul E. McKenney 已提交
2310 2311 2312 2313
	}
	return 0;
}

2314
/* Is a deferred wakeup of rcu_nocb_kthread() required? */
2315
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2316
{
2317
	return READ_ONCE(rdp->nocb_defer_wakeup);
2318 2319 2320 2321 2322
}

/* Do a deferred wakeup of rcu_nocb_kthread(). */
static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
2323 2324
	int ndw;

2325 2326
	if (!rcu_nocb_need_deferred_wakeup(rdp))
		return;
2327 2328
	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT);
2329 2330
	wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2331 2332
}

2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
void __init rcu_init_nohz(void)
{
	int cpu;
	bool need_rcu_nocb_mask = true;
	struct rcu_state *rsp;

#ifdef CONFIG_RCU_NOCB_CPU_NONE
	need_rcu_nocb_mask = false;
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */

#if defined(CONFIG_NO_HZ_FULL)
	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
		need_rcu_nocb_mask = true;
#endif /* #if defined(CONFIG_NO_HZ_FULL) */

	if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
2349 2350 2351 2352
		if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
			pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
			return;
		}
2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
		have_rcu_nocb_mask = true;
	}
	if (!have_rcu_nocb_mask)
		return;

#ifdef CONFIG_RCU_NOCB_CPU_ZERO
	pr_info("\tOffload RCU callbacks from CPU 0\n");
	cpumask_set_cpu(0, rcu_nocb_mask);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
#ifdef CONFIG_RCU_NOCB_CPU_ALL
	pr_info("\tOffload RCU callbacks from all CPUs\n");
	cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
#if defined(CONFIG_NO_HZ_FULL)
	if (tick_nohz_full_running)
		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
#endif /* #if defined(CONFIG_NO_HZ_FULL) */

	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
		pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
			    rcu_nocb_mask);
	}
2376 2377
	pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
		cpumask_pr_args(rcu_nocb_mask));
2378 2379 2380 2381
	if (rcu_nocb_poll)
		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");

	for_each_rcu_flavor(rsp) {
2382 2383
		for_each_cpu(cpu, rcu_nocb_mask)
			init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
2384
		rcu_organize_nocb_kthreads(rsp);
2385
	}
2386 2387
}

P
Paul E. McKenney 已提交
2388 2389 2390 2391 2392
/* Initialize per-rcu_data variables for no-CBs CPUs. */
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
	rdp->nocb_tail = &rdp->nocb_head;
	init_waitqueue_head(&rdp->nocb_wq);
2393
	rdp->nocb_follower_tail = &rdp->nocb_follower_head;
P
Paul E. McKenney 已提交
2394 2395
}

2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
/*
 * If the specified CPU is a no-CBs CPU that does not already have its
 * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
 * brought online out of order, this can require re-organizing the
 * leader-follower relationships.
 */
static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
{
	struct rcu_data *rdp;
	struct rcu_data *rdp_last;
	struct rcu_data *rdp_old_leader;
	struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
	struct task_struct *t;

	/*
	 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
	 * then nothing to do.
	 */
	if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
		return;

	/* If we didn't spawn the leader first, reorganize! */
	rdp_old_leader = rdp_spawn->nocb_leader;
	if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
		rdp_last = NULL;
		rdp = rdp_old_leader;
		do {
			rdp->nocb_leader = rdp_spawn;
			if (rdp_last && rdp != rdp_spawn)
				rdp_last->nocb_next_follower = rdp;
2426 2427 2428 2429 2430 2431 2432
			if (rdp == rdp_spawn) {
				rdp = rdp->nocb_next_follower;
			} else {
				rdp_last = rdp;
				rdp = rdp->nocb_next_follower;
				rdp_last->nocb_next_follower = NULL;
			}
2433 2434 2435 2436 2437 2438 2439 2440
		} while (rdp);
		rdp_spawn->nocb_next_follower = rdp_old_leader;
	}

	/* Spawn the kthread for this CPU and RCU flavor. */
	t = kthread_run(rcu_nocb_kthread, rdp_spawn,
			"rcuo%c/%d", rsp->abbr, cpu);
	BUG_ON(IS_ERR(t));
2441
	WRITE_ONCE(rdp_spawn->nocb_kthread, t);
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470
}

/*
 * If the specified CPU is a no-CBs CPU that does not already have its
 * rcuo kthreads, spawn them.
 */
static void rcu_spawn_all_nocb_kthreads(int cpu)
{
	struct rcu_state *rsp;

	if (rcu_scheduler_fully_active)
		for_each_rcu_flavor(rsp)
			rcu_spawn_one_nocb_kthread(rsp, cpu);
}

/*
 * Once the scheduler is running, spawn rcuo kthreads for all online
 * no-CBs CPUs.  This assumes that the early_initcall()s happen before
 * non-boot CPUs come online -- if this changes, we will need to add
 * some mutual exclusion.
 */
static void __init rcu_spawn_nocb_kthreads(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		rcu_spawn_all_nocb_kthreads(cpu);
}

2471 2472 2473 2474 2475
/* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
static int rcu_nocb_leader_stride = -1;
module_param(rcu_nocb_leader_stride, int, 0444);

/*
2476
 * Initialize leader-follower relationships for all no-CBs CPU.
2477
 */
2478
static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
P
Paul E. McKenney 已提交
2479 2480
{
	int cpu;
2481 2482
	int ls = rcu_nocb_leader_stride;
	int nl = 0;  /* Next leader. */
P
Paul E. McKenney 已提交
2483
	struct rcu_data *rdp;
2484 2485
	struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
	struct rcu_data *rdp_prev = NULL;
P
Paul E. McKenney 已提交
2486

2487
	if (!have_rcu_nocb_mask)
P
Paul E. McKenney 已提交
2488
		return;
2489 2490 2491 2492 2493 2494 2495 2496 2497
	if (ls == -1) {
		ls = int_sqrt(nr_cpu_ids);
		rcu_nocb_leader_stride = ls;
	}

	/*
	 * Each pass through this loop sets up one rcu_data structure and
	 * spawns one rcu_nocb_kthread().
	 */
P
Paul E. McKenney 已提交
2498 2499
	for_each_cpu(cpu, rcu_nocb_mask) {
		rdp = per_cpu_ptr(rsp->rda, cpu);
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
		if (rdp->cpu >= nl) {
			/* New leader, set up for followers & next leader. */
			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
			rdp->nocb_leader = rdp;
			rdp_leader = rdp;
		} else {
			/* Another follower, link to previous leader. */
			rdp->nocb_leader = rdp_leader;
			rdp_prev->nocb_next_follower = rdp;
		}
		rdp_prev = rdp;
P
Paul E. McKenney 已提交
2511 2512 2513 2514
	}
}

/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2515
static bool init_nocb_callback_list(struct rcu_data *rdp)
P
Paul E. McKenney 已提交
2516
{
2517
	if (!rcu_is_nocb_cpu(rdp->cpu))
2518
		return false;
2519

2520 2521 2522 2523 2524 2525 2526 2527 2528 2529
	/* If there are early-boot callbacks, move them to nocb lists. */
	if (rdp->nxtlist) {
		rdp->nocb_head = rdp->nxtlist;
		rdp->nocb_tail = rdp->nxttail[RCU_NEXT_TAIL];
		atomic_long_set(&rdp->nocb_q_count, rdp->qlen);
		atomic_long_set(&rdp->nocb_q_count_lazy, rdp->qlen_lazy);
		rdp->nxtlist = NULL;
		rdp->qlen = 0;
		rdp->qlen_lazy = 0;
	}
P
Paul E. McKenney 已提交
2530
	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2531
	return true;
P
Paul E. McKenney 已提交
2532 2533
}

2534 2535
#else /* #ifdef CONFIG_RCU_NOCB_CPU */

2536 2537 2538 2539 2540 2541
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
{
	WARN_ON_ONCE(1); /* Should be dead code. */
	return false;
}

2542
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
P
Paul E. McKenney 已提交
2543 2544 2545
{
}

2546 2547 2548 2549 2550 2551 2552
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
{
}

static void rcu_init_one_nocb(struct rcu_node *rnp)
{
}
P
Paul E. McKenney 已提交
2553 2554

static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2555
			    bool lazy, unsigned long flags)
P
Paul E. McKenney 已提交
2556
{
2557
	return false;
P
Paul E. McKenney 已提交
2558 2559 2560
}

static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2561 2562
						     struct rcu_data *rdp,
						     unsigned long flags)
P
Paul E. McKenney 已提交
2563
{
2564
	return false;
P
Paul E. McKenney 已提交
2565 2566 2567 2568 2569 2570
}

static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
}

2571
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2572 2573 2574 2575 2576 2577 2578 2579
{
	return false;
}

static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
}

2580 2581 2582 2583 2584
static void rcu_spawn_all_nocb_kthreads(int cpu)
{
}

static void __init rcu_spawn_nocb_kthreads(void)
P
Paul E. McKenney 已提交
2585 2586 2587
{
}

2588
static bool init_nocb_callback_list(struct rcu_data *rdp)
P
Paul E. McKenney 已提交
2589
{
2590
	return false;
P
Paul E. McKenney 已提交
2591 2592 2593
}

#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2594 2595 2596 2597 2598 2599 2600 2601 2602 2603

/*
 * An adaptive-ticks CPU can potentially execute in kernel mode for an
 * arbitrarily long period of time with the scheduling-clock tick turned
 * off.  RCU will be paying attention to this CPU because it is in the
 * kernel, but the CPU cannot be guaranteed to be executing the RCU state
 * machine because the scheduling-clock tick has been disabled.  Therefore,
 * if an adaptive-ticks CPU is failing to respond to the current grace
 * period and has not be idle from an RCU perspective, kick it.
 */
2604
static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2605 2606 2607 2608 2609 2610
{
#ifdef CONFIG_NO_HZ_FULL
	if (tick_nohz_full_cpu(cpu))
		smp_send_reschedule(cpu);
#endif /* #ifdef CONFIG_NO_HZ_FULL */
}
2611 2612 2613 2614


#ifdef CONFIG_NO_HZ_FULL_SYSIDLE

2615
static int full_sysidle_state;		/* Current system-idle state. */
2616 2617 2618 2619 2620 2621
#define RCU_SYSIDLE_NOT		0	/* Some CPU is not idle. */
#define RCU_SYSIDLE_SHORT	1	/* All CPUs idle for brief period. */
#define RCU_SYSIDLE_LONG	2	/* All CPUs idle for long enough. */
#define RCU_SYSIDLE_FULL	3	/* All CPUs idle, ready for sysidle. */
#define RCU_SYSIDLE_FULL_NOTED	4	/* Actually entered sysidle state. */

2622 2623 2624 2625 2626 2627
/*
 * Invoked to note exit from irq or task transition to idle.  Note that
 * usermode execution does -not- count as idle here!  After all, we want
 * to detect full-system idle states, not RCU quiescent states and grace
 * periods.  The caller must have disabled interrupts.
 */
2628
static void rcu_sysidle_enter(int irq)
2629 2630
{
	unsigned long j;
2631
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2632

2633 2634 2635 2636
	/* If there are no nohz_full= CPUs, no need to track this. */
	if (!tick_nohz_full_enabled())
		return;

2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
	/* Adjust nesting, check for fully idle. */
	if (irq) {
		rdtp->dynticks_idle_nesting--;
		WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
		if (rdtp->dynticks_idle_nesting != 0)
			return;  /* Still not fully idle. */
	} else {
		if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
		    DYNTICK_TASK_NEST_VALUE) {
			rdtp->dynticks_idle_nesting = 0;
		} else {
			rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
			WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
			return;  /* Still not fully idle. */
		}
	}

	/* Record start of fully idle period. */
	j = jiffies;
2656
	WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
2657
	smp_mb__before_atomic();
2658
	atomic_inc(&rdtp->dynticks_idle);
2659
	smp_mb__after_atomic();
2660 2661 2662
	WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
}

2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673
/*
 * Unconditionally force exit from full system-idle state.  This is
 * invoked when a normal CPU exits idle, but must be called separately
 * for the timekeeping CPU (tick_do_timer_cpu).  The reason for this
 * is that the timekeeping CPU is permitted to take scheduling-clock
 * interrupts while the system is in system-idle state, and of course
 * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
 * interrupt from any other type of interrupt.
 */
void rcu_sysidle_force_exit(void)
{
2674
	int oldstate = READ_ONCE(full_sysidle_state);
2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
	int newoldstate;

	/*
	 * Each pass through the following loop attempts to exit full
	 * system-idle state.  If contention proves to be a problem,
	 * a trylock-based contention tree could be used here.
	 */
	while (oldstate > RCU_SYSIDLE_SHORT) {
		newoldstate = cmpxchg(&full_sysidle_state,
				      oldstate, RCU_SYSIDLE_NOT);
		if (oldstate == newoldstate &&
		    oldstate == RCU_SYSIDLE_FULL_NOTED) {
			rcu_kick_nohz_cpu(tick_do_timer_cpu);
			return; /* We cleared it, done! */
		}
		oldstate = newoldstate;
	}
	smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
}

2695 2696 2697 2698 2699
/*
 * Invoked to note entry to irq or task transition from idle.  Note that
 * usermode execution does -not- count as idle here!  The caller must
 * have disabled interrupts.
 */
2700
static void rcu_sysidle_exit(int irq)
2701
{
2702 2703
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);

2704 2705 2706 2707
	/* If there are no nohz_full= CPUs, no need to track this. */
	if (!tick_nohz_full_enabled())
		return;

2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
	/* Adjust nesting, check for already non-idle. */
	if (irq) {
		rdtp->dynticks_idle_nesting++;
		WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
		if (rdtp->dynticks_idle_nesting != 1)
			return; /* Already non-idle. */
	} else {
		/*
		 * Allow for irq misnesting.  Yes, it really is possible
		 * to enter an irq handler then never leave it, and maybe
		 * also vice versa.  Handle both possibilities.
		 */
		if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
			rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
			WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
			return; /* Already non-idle. */
		} else {
			rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
		}
	}

	/* Record end of idle period. */
2730
	smp_mb__before_atomic();
2731
	atomic_inc(&rdtp->dynticks_idle);
2732
	smp_mb__after_atomic();
2733
	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752

	/*
	 * If we are the timekeeping CPU, we are permitted to be non-idle
	 * during a system-idle state.  This must be the case, because
	 * the timekeeping CPU has to take scheduling-clock interrupts
	 * during the time that the system is transitioning to full
	 * system-idle state.  This means that the timekeeping CPU must
	 * invoke rcu_sysidle_force_exit() directly if it does anything
	 * more than take a scheduling-clock interrupt.
	 */
	if (smp_processor_id() == tick_do_timer_cpu)
		return;

	/* Update system-idle state: We are clearly no longer fully idle! */
	rcu_sysidle_force_exit();
}

/*
 * Check to see if the current CPU is idle.  Note that usermode execution
2753 2754
 * does not count as idle.  The caller must have disabled interrupts,
 * and must be running on tick_do_timer_cpu.
2755 2756 2757 2758 2759 2760 2761 2762
 */
static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
				  unsigned long *maxj)
{
	int cur;
	unsigned long j;
	struct rcu_dynticks *rdtp = rdp->dynticks;

2763 2764 2765 2766
	/* If there are no nohz_full= CPUs, don't check system-wide idleness. */
	if (!tick_nohz_full_enabled())
		return;

2767 2768 2769 2770 2771
	/*
	 * If some other CPU has already reported non-idle, if this is
	 * not the flavor of RCU that tracks sysidle state, or if this
	 * is an offline or the timekeeping CPU, nothing to do.
	 */
2772
	if (!*isidle || rdp->rsp != rcu_state_p ||
2773 2774
	    cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
		return;
2775 2776
	/* Verify affinity of current kthread. */
	WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786

	/* Pick up current idle and NMI-nesting counter and check. */
	cur = atomic_read(&rdtp->dynticks_idle);
	if (cur & 0x1) {
		*isidle = false; /* We are not idle! */
		return;
	}
	smp_mb(); /* Read counters before timestamps. */

	/* Pick up timestamps. */
2787
	j = READ_ONCE(rdtp->dynticks_idle_jiffies);
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797
	/* If this CPU entered idle more recently, update maxj timestamp. */
	if (ULONG_CMP_LT(*maxj, j))
		*maxj = j;
}

/*
 * Is this the flavor of RCU that is handling full-system idle?
 */
static bool is_sysidle_rcu_state(struct rcu_state *rsp)
{
2798
	return rsp == rcu_state_p;
2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823
}

/*
 * Return a delay in jiffies based on the number of CPUs, rcu_node
 * leaf fanout, and jiffies tick rate.  The idea is to allow larger
 * systems more time to transition to full-idle state in order to
 * avoid the cache thrashing that otherwise occur on the state variable.
 * Really small systems (less than a couple of tens of CPUs) should
 * instead use a single global atomically incremented counter, and later
 * versions of this will automatically reconfigure themselves accordingly.
 */
static unsigned long rcu_sysidle_delay(void)
{
	if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
		return 0;
	return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
}

/*
 * Advance the full-system-idle state.  This is invoked when all of
 * the non-timekeeping CPUs are idle.
 */
static void rcu_sysidle(unsigned long j)
{
	/* Check the current state. */
2824
	switch (READ_ONCE(full_sysidle_state)) {
2825 2826 2827
	case RCU_SYSIDLE_NOT:

		/* First time all are idle, so note a short idle period. */
2828
		WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT);
2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
		break;

	case RCU_SYSIDLE_SHORT:

		/*
		 * Idle for a bit, time to advance to next state?
		 * cmpxchg failure means race with non-idle, let them win.
		 */
		if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
			(void)cmpxchg(&full_sysidle_state,
				      RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
		break;

	case RCU_SYSIDLE_LONG:

		/*
		 * Do an additional check pass before advancing to full.
		 * cmpxchg failure means race with non-idle, let them win.
		 */
		if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
			(void)cmpxchg(&full_sysidle_state,
				      RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
		break;

	default:
		break;
	}
}

/*
 * Found a non-idle non-timekeeping CPU, so kick the system-idle state
 * back to the beginning.
 */
static void rcu_sysidle_cancel(void)
{
	smp_mb();
2865
	if (full_sysidle_state > RCU_SYSIDLE_SHORT)
2866
		WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT);
2867 2868 2869 2870 2871 2872 2873 2874 2875
}

/*
 * Update the sysidle state based on the results of a force-quiescent-state
 * scan of the CPUs' dyntick-idle state.
 */
static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
			       unsigned long maxj, bool gpkt)
{
2876
	if (rsp != rcu_state_p)
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892
		return;  /* Wrong flavor, ignore. */
	if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
		return;  /* Running state machine from timekeeping CPU. */
	if (isidle)
		rcu_sysidle(maxj);    /* More idle! */
	else
		rcu_sysidle_cancel(); /* Idle is over. */
}

/*
 * Wrapper for rcu_sysidle_report() when called from the grace-period
 * kthread's context.
 */
static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
				  unsigned long maxj)
{
2893 2894 2895 2896
	/* If there are no nohz_full= CPUs, no need to track this. */
	if (!tick_nohz_full_enabled())
		return;

2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
	rcu_sysidle_report(rsp, isidle, maxj, true);
}

/* Callback and function for forcing an RCU grace period. */
struct rcu_sysidle_head {
	struct rcu_head rh;
	int inuse;
};

static void rcu_sysidle_cb(struct rcu_head *rhp)
{
	struct rcu_sysidle_head *rshp;

	/*
	 * The following memory barrier is needed to replace the
	 * memory barriers that would normally be in the memory
	 * allocator.
	 */
	smp_mb();  /* grace period precedes setting inuse. */

	rshp = container_of(rhp, struct rcu_sysidle_head, rh);
2918
	WRITE_ONCE(rshp->inuse, 0);
2919 2920 2921 2922
}

/*
 * Check to see if the system is fully idle, other than the timekeeping CPU.
2923 2924
 * The caller must have disabled interrupts.  This is not intended to be
 * called unless tick_nohz_full_enabled().
2925 2926 2927 2928
 */
bool rcu_sys_is_idle(void)
{
	static struct rcu_sysidle_head rsh;
2929
	int rss = READ_ONCE(full_sysidle_state);
2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949

	if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
		return false;

	/* Handle small-system case by doing a full scan of CPUs. */
	if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
		int oldrss = rss - 1;

		/*
		 * One pass to advance to each state up to _FULL.
		 * Give up if any pass fails to advance the state.
		 */
		while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
			int cpu;
			bool isidle = true;
			unsigned long maxj = jiffies - ULONG_MAX / 4;
			struct rcu_data *rdp;

			/* Scan all the CPUs looking for nonidle CPUs. */
			for_each_possible_cpu(cpu) {
2950
				rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
2951 2952 2953 2954
				rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
				if (!isidle)
					break;
			}
2955
			rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
2956
			oldrss = rss;
2957
			rss = READ_ONCE(full_sysidle_state);
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981
		}
	}

	/* If this is the first observation of an idle period, record it. */
	if (rss == RCU_SYSIDLE_FULL) {
		rss = cmpxchg(&full_sysidle_state,
			      RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
		return rss == RCU_SYSIDLE_FULL;
	}

	smp_mb(); /* ensure rss load happens before later caller actions. */

	/* If already fully idle, tell the caller (in case of races). */
	if (rss == RCU_SYSIDLE_FULL_NOTED)
		return true;

	/*
	 * If we aren't there yet, and a grace period is not in flight,
	 * initiate a grace period.  Either way, tell the caller that
	 * we are not there yet.  We use an xchg() rather than an assignment
	 * to make up for the memory barriers that would otherwise be
	 * provided by the memory allocator.
	 */
	if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
2982
	    !rcu_gp_in_progress(rcu_state_p) &&
2983 2984 2985
	    !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
		call_rcu(&rsh.rh, rcu_sysidle_cb);
	return false;
2986 2987
}

2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
/*
 * Initialize dynticks sysidle state for CPUs coming online.
 */
static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
{
	rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
}

#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */

2998
static void rcu_sysidle_enter(int irq)
2999 3000 3001
{
}

3002
static void rcu_sysidle_exit(int irq)
3003 3004 3005
{
}

3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020
static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
				  unsigned long *maxj)
{
}

static bool is_sysidle_rcu_state(struct rcu_state *rsp)
{
	return false;
}

static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
				  unsigned long maxj)
{
}

3021 3022 3023 3024 3025
static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
{
}

#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3026 3027 3028 3029 3030 3031 3032 3033

/*
 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
 * grace-period kthread will do force_quiescent_state() processing?
 * The idea is to avoid waking up RCU core processing on such a
 * CPU unless the grace period has extended for too long.
 *
 * This code relies on the fact that all NO_HZ_FULL CPUs are also
3034
 * CONFIG_RCU_NOCB_CPU CPUs.
3035 3036 3037 3038 3039 3040
 */
static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
{
#ifdef CONFIG_NO_HZ_FULL
	if (tick_nohz_full_cpu(smp_processor_id()) &&
	    (!rcu_gp_in_progress(rsp) ||
3041
	     ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
3042
		return true;
3043
#endif /* #ifdef CONFIG_NO_HZ_FULL */
3044
	return false;
3045
}
3046 3047 3048 3049 3050 3051 3052

/*
 * Bind the grace-period kthread for the sysidle flavor of RCU to the
 * timekeeping CPU.
 */
static void rcu_bind_gp_kthread(void)
{
3053
	int __maybe_unused cpu;
3054

3055
	if (!tick_nohz_full_enabled())
3056
		return;
3057 3058
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
	cpu = tick_do_timer_cpu;
3059
	if (cpu >= 0 && cpu < nr_cpu_ids)
3060
		set_cpus_allowed_ptr(current, cpumask_of(cpu));
3061
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3062
	housekeeping_affine(current);
3063
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3064
}
3065 3066 3067 3068 3069

/* Record the current task on dyntick-idle entry. */
static void rcu_dynticks_task_enter(void)
{
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3070
	WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
3071 3072 3073 3074 3075 3076 3077
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}

/* Record no current task on dyntick-idle exit. */
static void rcu_dynticks_task_exit(void)
{
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3078
	WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
3079 3080
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}