srcutree.c 37.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 * Sleepable Read-Copy Update mechanism for mutual exclusion.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 * Copyright (C) IBM Corporation, 2006
 * Copyright (C) Fujitsu, 2012
 *
 * Author: Paul McKenney <paulmck@us.ibm.com>
 *	   Lai Jiangshan <laijs@cn.fujitsu.com>
 *
 * For detailed explanation of Read-Copy Update mechanism see -
 *		Documentation/RCU/ *.txt
 *
 */

#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate_wait.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/srcu.h>

#include "rcu.h"

41 42 43 44 45 46 47 48 49 50
static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);

/*
 * Initialize SRCU combining tree.  Note that statically allocated
 * srcu_struct structures might already have srcu_read_lock() and
 * srcu_read_unlock() running against them.  So if the is_static parameter
 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
 */
static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
51
{
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
	int cpu;
	int i;
	int level = 0;
	int levelspread[RCU_NUM_LVLS];
	struct srcu_data *sdp;
	struct srcu_node *snp;
	struct srcu_node *snp_first;

	/* Work out the overall tree geometry. */
	sp->level[0] = &sp->node[0];
	for (i = 1; i < rcu_num_lvls; i++)
		sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
	rcu_init_levelspread(levelspread, num_rcu_lvl);

	/* Each pass through this loop initializes one srcu_node structure. */
	rcu_for_each_node_breadth_first(sp, snp) {
		spin_lock_init(&snp->lock);
69 70 71
		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
			     ARRAY_SIZE(snp->srcu_data_have_cbs));
		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
72
			snp->srcu_have_cbs[i] = 0;
73 74
			snp->srcu_data_have_cbs[i] = 0;
		}
75
		snp->srcu_gp_seq_needed_exp = 0;
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
		snp->grplo = -1;
		snp->grphi = -1;
		if (snp == &sp->node[0]) {
			/* Root node, special case. */
			snp->srcu_parent = NULL;
			continue;
		}

		/* Non-root node. */
		if (snp == sp->level[level + 1])
			level++;
		snp->srcu_parent = sp->level[level - 1] +
				   (snp - sp->level[level]) /
				   levelspread[level - 1];
	}

	/*
	 * Initialize the per-CPU srcu_data array, which feeds into the
	 * leaves of the srcu_node tree.
	 */
	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
		     ARRAY_SIZE(sdp->srcu_unlock_count));
	level = rcu_num_lvls - 1;
	snp_first = sp->level[level];
	for_each_possible_cpu(cpu) {
		sdp = per_cpu_ptr(sp->sda, cpu);
		spin_lock_init(&sdp->lock);
		rcu_segcblist_init(&sdp->srcu_cblist);
		sdp->srcu_cblist_invoking = false;
		sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
106
		sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
107 108 109 110 111 112 113 114 115
		sdp->mynode = &snp_first[cpu / levelspread[level]];
		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
			if (snp->grplo < 0)
				snp->grplo = cpu;
			snp->grphi = cpu;
		}
		sdp->cpu = cpu;
		INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
		sdp->sp = sp;
116
		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
		if (is_static)
			continue;

		/* Dynamically allocated, better be no srcu_read_locks()! */
		for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
			sdp->srcu_lock_count[i] = 0;
			sdp->srcu_unlock_count[i] = 0;
		}
	}
}

/*
 * Initialize non-compile-time initialized fields, including the
 * associated srcu_node and srcu_data structures.  The is_static
 * parameter is passed through to init_srcu_struct_nodes(), and
 * also tells us that ->sda has already been wired up to srcu_data.
 */
static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
{
	mutex_init(&sp->srcu_cb_mutex);
	mutex_init(&sp->srcu_gp_mutex);
	sp->srcu_idx = 0;
139
	sp->srcu_gp_seq = 0;
140 141 142
	sp->srcu_barrier_seq = 0;
	mutex_init(&sp->srcu_barrier_mutex);
	atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
143
	INIT_DELAYED_WORK(&sp->work, process_srcu);
144 145 146
	if (!is_static)
		sp->sda = alloc_percpu(struct srcu_data);
	init_srcu_struct_nodes(sp, is_static);
147
	sp->srcu_gp_seq_needed_exp = 0;
148 149
	smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
	return sp->sda ? 0 : -ENOMEM;
150 151 152 153 154 155 156 157 158 159
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC

int __init_srcu_struct(struct srcu_struct *sp, const char *name,
		       struct lock_class_key *key)
{
	/* Don't re-initialize a lock while it is held. */
	debug_check_no_locks_freed((void *)sp, sizeof(*sp));
	lockdep_init_map(&sp->dep_map, name, key, 0);
160 161
	spin_lock_init(&sp->gp_lock);
	return init_srcu_struct_fields(sp, false);
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

/**
 * init_srcu_struct - initialize a sleep-RCU structure
 * @sp: structure to initialize.
 *
 * Must invoke this on a given srcu_struct before passing that srcu_struct
 * to any other function.  Each srcu_struct represents a separate domain
 * of SRCU protection.
 */
int init_srcu_struct(struct srcu_struct *sp)
{
177 178
	spin_lock_init(&sp->gp_lock);
	return init_srcu_struct_fields(sp, false);
179 180 181 182 183 184
}
EXPORT_SYMBOL_GPL(init_srcu_struct);

#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

/*
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
 * First-use initialization of statically allocated srcu_struct
 * structure.  Wiring up the combining tree is more than can be
 * done with compile-time initialization, so this check is added
 * to each update-side SRCU primitive.  Use ->gp_lock, which -is-
 * compile-time initialized, to resolve races involving multiple
 * CPUs trying to garner first-use privileges.
 */
static void check_init_srcu_struct(struct srcu_struct *sp)
{
	unsigned long flags;

	WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
	/* The smp_load_acquire() pairs with the smp_store_release(). */
	if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
		return; /* Already initialized. */
	spin_lock_irqsave(&sp->gp_lock, flags);
	if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
		spin_unlock_irqrestore(&sp->gp_lock, flags);
		return;
	}
	init_srcu_struct_fields(sp, true);
	spin_unlock_irqrestore(&sp->gp_lock, flags);
}

/*
 * Returns approximate total of the readers' ->srcu_lock_count[] values
 * for the rank of per-CPU counters specified by idx.
212 213 214 215 216 217 218
 */
static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
{
	int cpu;
	unsigned long sum = 0;

	for_each_possible_cpu(cpu) {
219
		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
220

221
		sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
222 223 224 225 226
	}
	return sum;
}

/*
227 228
 * Returns approximate total of the readers' ->srcu_unlock_count[] values
 * for the rank of per-CPU counters specified by idx.
229 230 231 232 233 234 235
 */
static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
{
	int cpu;
	unsigned long sum = 0;

	for_each_possible_cpu(cpu) {
236
		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
237

238
		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	}
	return sum;
}

/*
 * Return true if the number of pre-existing readers is determined to
 * be zero.
 */
static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
{
	unsigned long unlocks;

	unlocks = srcu_readers_unlock_idx(sp, idx);

	/*
	 * Make sure that a lock is always counted if the corresponding
	 * unlock is counted. Needs to be a smp_mb() as the read side may
	 * contain a read from a variable that is written to before the
	 * synchronize_srcu() in the write side. In this case smp_mb()s
	 * A and B act like the store buffering pattern.
	 *
	 * This smp_mb() also pairs with smp_mb() C to prevent accesses
	 * after the synchronize_srcu() from being executed before the
	 * grace period ends.
	 */
	smp_mb(); /* A */

	/*
	 * If the locks are the same as the unlocks, then there must have
	 * been no readers on this index at some time in between. This does
	 * not mean that there are no more readers, as one could have read
	 * the current index but not have incremented the lock counter yet.
	 *
	 * Possible bug: There is no guarantee that there haven't been
273
	 * ULONG_MAX increments of ->srcu_lock_count[] since the unlocks were
274 275
	 * counted, meaning that this could return true even if there are
	 * still active readers.  Since there are no memory barriers around
276
	 * srcu_flip(), the CPU is not required to increment ->srcu_idx
277 278 279
	 * before running srcu_readers_unlock_idx(), which means that there
	 * could be an arbitrarily large number of critical sections that
	 * execute after srcu_readers_unlock_idx() but use the old value
280
	 * of ->srcu_idx.
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
	 */
	return srcu_readers_lock_idx(sp, idx) == unlocks;
}

/**
 * srcu_readers_active - returns true if there are readers. and false
 *                       otherwise
 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
 *
 * Note that this is not an atomic primitive, and can therefore suffer
 * severe errors when invoked on an active srcu_struct.  That said, it
 * can be useful as an error check at cleanup time.
 */
static bool srcu_readers_active(struct srcu_struct *sp)
{
	int cpu;
	unsigned long sum = 0;

	for_each_possible_cpu(cpu) {
300
		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
301

302 303 304 305
		sum += READ_ONCE(cpuc->srcu_lock_count[0]);
		sum += READ_ONCE(cpuc->srcu_lock_count[1]);
		sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
		sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
306 307 308 309 310 311
	}
	return sum;
}

#define SRCU_INTERVAL		1

312 313 314 315 316 317 318 319 320 321 322 323
/*
 * Return grace-period delay, zero if there are expedited grace
 * periods pending, SRCU_INTERVAL otherwise.
 */
static unsigned long srcu_get_delay(struct srcu_struct *sp)
{
	if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
			 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
		return 0;
	return SRCU_INTERVAL;
}

324 325 326 327 328 329 330 331 332
/**
 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
 * @sp: structure to clean up.
 *
 * Must invoke this after you are finished using a given srcu_struct that
 * was initialized via init_srcu_struct(), else you leak memory.
 */
void cleanup_srcu_struct(struct srcu_struct *sp)
{
333 334
	int cpu;

335 336
	if (WARN_ON(!srcu_get_delay(sp)))
		return; /* Leakage unless caller handles error. */
337 338 339
	if (WARN_ON(srcu_readers_active(sp)))
		return; /* Leakage unless caller handles error. */
	flush_delayed_work(&sp->work);
340 341 342 343 344
	for_each_possible_cpu(cpu)
		flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
	if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
	    WARN_ON(srcu_readers_active(sp))) {
		pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
345 346
		return; /* Caller forgot to stop doing call_srcu()? */
	}
347 348
	free_percpu(sp->sda);
	sp->sda = NULL;
349 350 351 352 353 354 355 356 357 358 359 360
}
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);

/*
 * Counts the new reader in the appropriate per-CPU element of the
 * srcu_struct.  Must be called from process context.
 * Returns an index that must be passed to the matching srcu_read_unlock().
 */
int __srcu_read_lock(struct srcu_struct *sp)
{
	int idx;

361 362
	idx = READ_ONCE(sp->srcu_idx) & 0x1;
	__this_cpu_inc(sp->sda->srcu_lock_count[idx]);
363 364 365 366 367 368 369 370 371 372 373 374 375 376
	smp_mb(); /* B */  /* Avoid leaking the critical section. */
	return idx;
}
EXPORT_SYMBOL_GPL(__srcu_read_lock);

/*
 * Removes the count for the old reader from the appropriate per-CPU
 * element of the srcu_struct.  Note that this may well be a different
 * CPU than that which was incremented by the corresponding srcu_read_lock().
 * Must be called from process context.
 */
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
	smp_mb(); /* C */  /* Avoid leaking the critical section. */
377
	this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);

/*
 * We use an adaptive strategy for synchronize_srcu() and especially for
 * synchronize_srcu_expedited().  We spin for a fixed time period
 * (defined below) to allow SRCU readers to exit their read-side critical
 * sections.  If there are still some readers after a few microseconds,
 * we repeatedly block for 1-millisecond time periods.
 */
#define SRCU_RETRY_CHECK_DELAY		5

/*
 * Start an SRCU grace period.
 */
static void srcu_gp_start(struct srcu_struct *sp)
{
395
	struct srcu_data *sdp = this_cpu_ptr(sp->sda);
396 397
	int state;

398 399 400 401 402 403 404
	RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock),
			 "Invoked srcu_gp_start() without ->gp_lock!");
	WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
	rcu_segcblist_advance(&sdp->srcu_cblist,
			      rcu_seq_current(&sp->srcu_gp_seq));
	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
				       rcu_seq_snap(&sp->srcu_gp_seq));
405
	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
406 407 408 409 410
	rcu_seq_start(&sp->srcu_gp_seq);
	state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
	WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
}

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
/*
 * Track online CPUs to guide callback workqueue placement.
 */
DEFINE_PER_CPU(bool, srcu_online);

void srcu_online_cpu(unsigned int cpu)
{
	WRITE_ONCE(per_cpu(srcu_online, cpu), true);
}

void srcu_offline_cpu(unsigned int cpu)
{
	WRITE_ONCE(per_cpu(srcu_online, cpu), false);
}

/*
 * Place the workqueue handler on the specified CPU if online, otherwise
 * just run it whereever.  This is useful for placing workqueue handlers
 * that are to invoke the specified CPU's callbacks.
 */
static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
				       struct delayed_work *dwork,
				       unsigned long delay)
{
	bool ret;

	preempt_disable();
	if (READ_ONCE(per_cpu(srcu_online, cpu)))
		ret = queue_delayed_work_on(cpu, wq, dwork, delay);
	else
		ret = queue_delayed_work(wq, dwork, delay);
	preempt_enable();
	return ret;
}

/*
 * Schedule callback invocation for the specified srcu_data structure,
 * if possible, on the corresponding CPU.
 */
static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
{
	srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq,
				   &sdp->work, delay);
}

/*
 * Schedule callback invocation for all srcu_data structures associated
458 459 460
 * with the specified srcu_node structure that have callbacks for the
 * just-completed grace period, the one corresponding to idx.  If possible,
 * schedule this invocation on the corresponding CPUs.
461
 */
462
static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
463
				  unsigned long mask, unsigned long delay)
464 465 466
{
	int cpu;

467 468 469
	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
		if (!(mask & (1 << (cpu - snp->grplo))))
			continue;
470
		srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
471
	}
472 473 474 475 476 477 478 479 480 481 482 483 484
}

/*
 * Note the end of an SRCU grace period.  Initiates callback invocation
 * and starts a new grace period if needed.
 *
 * The ->srcu_cb_mutex acquisition does not protect any data, but
 * instead prevents more than one grace period from starting while we
 * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
 * array to have a finite number of elements.
 */
static void srcu_gp_end(struct srcu_struct *sp)
{
485
	unsigned long cbdelay;
486 487 488 489
	bool cbs;
	unsigned long gpseq;
	int idx;
	int idxnext;
490
	unsigned long mask;
491 492 493 494 495 496 497 498 499
	struct srcu_node *snp;

	/* Prevent more than one additional grace period. */
	mutex_lock(&sp->srcu_cb_mutex);

	/* End the current grace period. */
	spin_lock_irq(&sp->gp_lock);
	idx = rcu_seq_state(sp->srcu_gp_seq);
	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
500
	cbdelay = srcu_get_delay(sp);
501 502
	rcu_seq_end(&sp->srcu_gp_seq);
	gpseq = rcu_seq_current(&sp->srcu_gp_seq);
503 504
	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
		sp->srcu_gp_seq_needed_exp = gpseq;
505 506 507 508 509 510 511 512 513 514 515 516 517 518
	spin_unlock_irq(&sp->gp_lock);
	mutex_unlock(&sp->srcu_gp_mutex);
	/* A new grace period can start at this point.  But only one. */

	/* Initiate callback invocation as needed. */
	idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
	idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
	rcu_for_each_node_breadth_first(sp, snp) {
		spin_lock_irq(&snp->lock);
		cbs = false;
		if (snp >= sp->level[rcu_num_lvls - 1])
			cbs = snp->srcu_have_cbs[idx] == gpseq;
		snp->srcu_have_cbs[idx] = gpseq;
		rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
519 520
		if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
			snp->srcu_gp_seq_needed_exp = gpseq;
521 522
		mask = snp->srcu_data_have_cbs[idx];
		snp->srcu_data_have_cbs[idx] = 0;
523 524 525
		spin_unlock_irq(&snp->lock);
		if (cbs) {
			smp_mb(); /* GP end before CB invocation. */
526
			srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
527 528 529 530 531 532 533 534 535 536 537 538 539 540
		}
	}

	/* Callback initiation done, allow grace periods after next. */
	mutex_unlock(&sp->srcu_cb_mutex);

	/* Start a new grace period if needed. */
	spin_lock_irq(&sp->gp_lock);
	gpseq = rcu_seq_current(&sp->srcu_gp_seq);
	if (!rcu_seq_state(gpseq) &&
	    ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
		srcu_gp_start(sp);
		spin_unlock_irq(&sp->gp_lock);
		/* Throttle expedited grace periods: Should be rare! */
541 542
		srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
				    ? 0 : SRCU_INTERVAL);
543 544 545 546 547
	} else {
		spin_unlock_irq(&sp->gp_lock);
	}
}

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
/*
 * Funnel-locking scheme to scalably mediate many concurrent expedited
 * grace-period requests.  This function is invoked for the first known
 * expedited request for a grace period that has already been requested,
 * but without expediting.  To start a completely new grace period,
 * whether expedited or not, use srcu_funnel_gp_start() instead.
 */
static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
				  unsigned long s)
{
	unsigned long flags;

	for (; snp != NULL; snp = snp->srcu_parent) {
		if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
		    ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
			return;
		spin_lock_irqsave(&snp->lock, flags);
		if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
			spin_unlock_irqrestore(&snp->lock, flags);
			return;
		}
		WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
		spin_unlock_irqrestore(&snp->lock, flags);
	}
	spin_lock_irqsave(&sp->gp_lock, flags);
	if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
		sp->srcu_gp_seq_needed_exp = s;
	spin_unlock_irqrestore(&sp->gp_lock, flags);
}

578 579 580 581 582 583 584
/*
 * Funnel-locking scheme to scalably mediate many concurrent grace-period
 * requests.  The winner has to do the work of actually starting grace
 * period s.  Losers must either ensure that their desired grace-period
 * number is recorded on at least their leaf srcu_node structure, or they
 * must take steps to invoke their own callbacks.
 */
585 586
static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
				 unsigned long s, bool do_norm)
587 588 589 590 591 592 593 594 595 596 597 598 599
{
	unsigned long flags;
	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
	struct srcu_node *snp = sdp->mynode;
	unsigned long snp_seq;

	/* Each pass through the loop does one level of the srcu_node tree. */
	for (; snp != NULL; snp = snp->srcu_parent) {
		if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
			return; /* GP already done and CBs recorded. */
		spin_lock_irqsave(&snp->lock, flags);
		if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
			snp_seq = snp->srcu_have_cbs[idx];
600 601
			if (snp == sdp->mynode && snp_seq == s)
				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
602 603 604
			spin_unlock_irqrestore(&snp->lock, flags);
			if (snp == sdp->mynode && snp_seq != s) {
				smp_mb(); /* CBs after GP! */
605 606 607 608
				srcu_schedule_cbs_sdp(sdp, do_norm
							   ? SRCU_INTERVAL
							   : 0);
				return;
609
			}
610 611
			if (!do_norm)
				srcu_funnel_exp_start(sp, snp, s);
612 613 614
			return;
		}
		snp->srcu_have_cbs[idx] = s;
615 616
		if (snp == sdp->mynode)
			snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
617 618
		if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
			snp->srcu_gp_seq_needed_exp = s;
619 620 621 622 623 624 625 626 627 628 629 630
		spin_unlock_irqrestore(&snp->lock, flags);
	}

	/* Top of tree, must ensure the grace period will be started. */
	spin_lock_irqsave(&sp->gp_lock, flags);
	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
		/*
		 * Record need for grace period s.  Pair with load
		 * acquire setting up for initialization.
		 */
		smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
	}
631 632
	if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
		sp->srcu_gp_seq_needed_exp = s;
633 634 635 636 637 638 639

	/* If grace period not already done and none in progress, start it. */
	if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
	    rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
		WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
		srcu_gp_start(sp);
		queue_delayed_work(system_power_efficient_wq, &sp->work,
640
				   srcu_get_delay(sp));
641 642 643 644
	}
	spin_unlock_irqrestore(&sp->gp_lock, flags);
}

645 646 647
/*
 * Wait until all readers counted by array index idx complete, but
 * loop an additional time if there is an expedited grace period pending.
648
 * The caller must ensure that ->srcu_idx is not changed while checking.
649 650 651 652 653 654
 */
static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
{
	for (;;) {
		if (srcu_readers_active_idx_check(sp, idx))
			return true;
655
		if (--trycount + !srcu_get_delay(sp) <= 0)
656 657 658 659 660 661
			return false;
		udelay(SRCU_RETRY_CHECK_DELAY);
	}
}

/*
662 663
 * Increment the ->srcu_idx counter so that future SRCU readers will
 * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
664 665 666 667
 * us to wait for pre-existing readers in a starvation-free manner.
 */
static void srcu_flip(struct srcu_struct *sp)
{
668
	WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
669 670 671 672 673 674 675 676 677 678 679

	/*
	 * Ensure that if the updater misses an __srcu_read_unlock()
	 * increment, that task's next __srcu_read_lock() will see the
	 * above counter update.  Note that both this memory barrier
	 * and the one in srcu_readers_active_idx_check() provide the
	 * guarantee for __srcu_read_lock().
	 */
	smp_mb(); /* D */  /* Pairs with C. */
}

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
/*
 * If SRCU is likely idle, return true, otherwise return false.
 *
 * Note that it is OK for several current from-idle requests for a new
 * grace period from idle to specify expediting because they will all end
 * up requesting the same grace period anyhow.  So no loss.
 *
 * Note also that if any CPU (including the current one) is still invoking
 * callbacks, this function will nevertheless say "idle".  This is not
 * ideal, but the overhead of checking all CPUs' callback lists is even
 * less ideal, especially on large systems.  Furthermore, the wakeup
 * can happen before the callback is fully removed, so we have no choice
 * but to accept this type of error.
 *
 * This function is also subject to counter-wrap errors, but let's face
 * it, if this function was preempted for enough time for the counters
 * to wrap, it really doesn't matter whether or not we expedite the grace
 * period.  The extra overhead of a needlessly expedited grace period is
 * negligible when amoritized over that time period, and the extra latency
 * of a needlessly non-expedited grace period is similarly negligible.
 */
static bool srcu_might_be_idle(struct srcu_struct *sp)
{
	unsigned long flags;
	struct srcu_data *sdp;
	unsigned long curseq;

	/* If the local srcu_data structure has callbacks, not idle.  */
	local_irq_save(flags);
	sdp = this_cpu_ptr(sp->sda);
	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
		local_irq_restore(flags);
		return false; /* Callbacks already present, so not idle. */
	}
	local_irq_restore(flags);

	/*
	 * No local callbacks, so probabalistically probe global state.
	 * Exact information would require acquiring locks, which would
	 * kill scalability, hence the probabalistic nature of the probe.
	 */
	curseq = rcu_seq_current(&sp->srcu_gp_seq);
	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
	if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
		return false; /* Grace period in progress, so not idle. */
	smp_mb(); /* Order ->srcu_gp_seq with prior access. */
	if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
		return false; /* GP # changed, so not idle. */
	return true; /* With reasonable probability, idle! */
}

731
/*
732 733 734
 * Enqueue an SRCU callback on the srcu_data structure associated with
 * the current CPU and the specified srcu_struct structure, initiating
 * grace-period processing if it is not already running.
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
 *
 * Note that all CPUs must agree that the grace period extended beyond
 * all pre-existing SRCU read-side critical section.  On systems with
 * more than one CPU, this means that when "func()" is invoked, each CPU
 * is guaranteed to have executed a full memory barrier since the end of
 * its last corresponding SRCU read-side critical section whose beginning
 * preceded the call to call_rcu().  It also means that each CPU executing
 * an SRCU read-side critical section that continues beyond the start of
 * "func()" must have executed a memory barrier after the call_rcu()
 * but before the beginning of that SRCU read-side critical section.
 * Note that these guarantees include CPUs that are offline, idle, or
 * executing in user mode, as well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
 * resulting SRCU callback function "func()", then both CPU A and CPU
 * B are guaranteed to execute a full memory barrier during the time
 * interval between the call to call_rcu() and the invocation of "func()".
 * This guarantee applies even if CPU A and CPU B are the same CPU (but
 * again only if the system has more than one CPU).
 *
 * Of course, these guarantees apply only for invocations of call_srcu(),
 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
 * srcu_struct structure.
 */
759 760
void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
		 rcu_callback_t func, bool do_norm)
761 762
{
	unsigned long flags;
763
	bool needexp = false;
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
	bool needgp = false;
	unsigned long s;
	struct srcu_data *sdp;

	check_init_srcu_struct(sp);
	rhp->func = func;
	local_irq_save(flags);
	sdp = this_cpu_ptr(sp->sda);
	spin_lock(&sdp->lock);
	rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
	rcu_segcblist_advance(&sdp->srcu_cblist,
			      rcu_seq_current(&sp->srcu_gp_seq));
	s = rcu_seq_snap(&sp->srcu_gp_seq);
	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
		sdp->srcu_gp_seq_needed = s;
		needgp = true;
781
	}
782 783 784 785
	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
		sdp->srcu_gp_seq_needed_exp = s;
		needexp = true;
	}
786 787
	spin_unlock_irqrestore(&sdp->lock, flags);
	if (needgp)
788 789 790 791 792 793 794 795 796
		srcu_funnel_gp_start(sp, sdp, s, do_norm);
	else if (needexp)
		srcu_funnel_exp_start(sp, sdp->mynode, s);
}

void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
	       rcu_callback_t func)
{
	__call_srcu(sp, rhp, func, true);
797 798 799 800 801 802
}
EXPORT_SYMBOL_GPL(call_srcu);

/*
 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
 */
803
static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
804 805 806 807 808 809 810 811 812 813 814 815
{
	struct rcu_synchronize rcu;

	RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
			 lock_is_held(&rcu_bh_lock_map) ||
			 lock_is_held(&rcu_lock_map) ||
			 lock_is_held(&rcu_sched_lock_map),
			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");

	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
		return;
	might_sleep();
816
	check_init_srcu_struct(sp);
817
	init_completion(&rcu.completion);
818
	init_rcu_head_on_stack(&rcu.head);
819
	__call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
820
	wait_for_completion(&rcu.completion);
821
	destroy_rcu_head_on_stack(&rcu.head);
822 823 824 825 826 827 828 829 830 831 832 833 834 835
}

/**
 * synchronize_srcu_expedited - Brute-force SRCU grace period
 * @sp: srcu_struct with which to synchronize.
 *
 * Wait for an SRCU grace period to elapse, but be more aggressive about
 * spinning rather than blocking when waiting.
 *
 * Note that synchronize_srcu_expedited() has the same deadlock and
 * memory-ordering properties as does synchronize_srcu().
 */
void synchronize_srcu_expedited(struct srcu_struct *sp)
{
836
	__synchronize_srcu(sp, rcu_gp_is_normal());
837 838 839 840 841 842 843 844 845
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);

/**
 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
 * @sp: srcu_struct with which to synchronize.
 *
 * Wait for the count to drain to zero of both indexes. To avoid the
 * possible starvation of synchronize_srcu(), it waits for the count of
846 847
 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
 * and then flip the srcu_idx and wait for the count of the other index.
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
 *
 * Can block; must be called from process context.
 *
 * Note that it is illegal to call synchronize_srcu() from the corresponding
 * SRCU read-side critical section; doing so will result in deadlock.
 * However, it is perfectly legal to call synchronize_srcu() on one
 * srcu_struct from some other srcu_struct's read-side critical section,
 * as long as the resulting graph of srcu_structs is acyclic.
 *
 * There are memory-ordering constraints implied by synchronize_srcu().
 * On systems with more than one CPU, when synchronize_srcu() returns,
 * each CPU is guaranteed to have executed a full memory barrier since
 * the end of its last corresponding SRCU-sched read-side critical section
 * whose beginning preceded the call to synchronize_srcu().  In addition,
 * each CPU having an SRCU read-side critical section that extends beyond
 * the return from synchronize_srcu() is guaranteed to have executed a
 * full memory barrier after the beginning of synchronize_srcu() and before
 * the beginning of that SRCU read-side critical section.  Note that these
 * guarantees include CPUs that are offline, idle, or executing in user mode,
 * as well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
 * to have executed a full memory barrier during the execution of
 * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
 * are the same CPU, but again only if the system has more than one CPU.
 *
 * Of course, these memory-ordering guarantees apply only when
 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
 * passed the same srcu_struct structure.
878 879 880 881 882
 *
 * If SRCU is likely idle, expedite the first request.  This semantic
 * was provided by Classic SRCU, and is relied upon by its users, so TREE
 * SRCU must also provide it.  Note that detecting idleness is heuristic
 * and subject to both false positives and negatives.
883 884 885
 */
void synchronize_srcu(struct srcu_struct *sp)
{
886
	if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
887 888
		synchronize_srcu_expedited(sp);
	else
889
		__synchronize_srcu(sp, true);
890 891 892
}
EXPORT_SYMBOL_GPL(synchronize_srcu);

893 894 895 896 897 898 899 900 901 902 903 904 905 906
/*
 * Callback function for srcu_barrier() use.
 */
static void srcu_barrier_cb(struct rcu_head *rhp)
{
	struct srcu_data *sdp;
	struct srcu_struct *sp;

	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
	sp = sdp->sp;
	if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
		complete(&sp->srcu_barrier_completion);
}

907 908 909 910 911 912
/**
 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
 * @sp: srcu_struct on which to wait for in-flight callbacks.
 */
void srcu_barrier(struct srcu_struct *sp)
{
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955
	int cpu;
	struct srcu_data *sdp;
	unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);

	check_init_srcu_struct(sp);
	mutex_lock(&sp->srcu_barrier_mutex);
	if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
		smp_mb(); /* Force ordering following return. */
		mutex_unlock(&sp->srcu_barrier_mutex);
		return; /* Someone else did our work for us. */
	}
	rcu_seq_start(&sp->srcu_barrier_seq);
	init_completion(&sp->srcu_barrier_completion);

	/* Initial count prevents reaching zero until all CBs are posted. */
	atomic_set(&sp->srcu_barrier_cpu_cnt, 1);

	/*
	 * Each pass through this loop enqueues a callback, but only
	 * on CPUs already having callbacks enqueued.  Note that if
	 * a CPU already has callbacks enqueue, it must have already
	 * registered the need for a future grace period, so all we
	 * need do is enqueue a callback that will use the same
	 * grace period as the last callback already in the queue.
	 */
	for_each_possible_cpu(cpu) {
		sdp = per_cpu_ptr(sp->sda, cpu);
		spin_lock_irq(&sdp->lock);
		atomic_inc(&sp->srcu_barrier_cpu_cnt);
		sdp->srcu_barrier_head.func = srcu_barrier_cb;
		if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
					   &sdp->srcu_barrier_head, 0))
			atomic_dec(&sp->srcu_barrier_cpu_cnt);
		spin_unlock_irq(&sdp->lock);
	}

	/* Remove the initial count, at which point reaching zero can happen. */
	if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
		complete(&sp->srcu_barrier_completion);
	wait_for_completion(&sp->srcu_barrier_completion);

	rcu_seq_end(&sp->srcu_barrier_seq);
	mutex_unlock(&sp->srcu_barrier_mutex);
956 957 958 959 960 961 962 963 964 965 966 967
}
EXPORT_SYMBOL_GPL(srcu_barrier);

/**
 * srcu_batches_completed - return batches completed.
 * @sp: srcu_struct on which to report batch completion.
 *
 * Report the number of batches, correlated with, but not necessarily
 * precisely the same as, the number of grace periods that have elapsed.
 */
unsigned long srcu_batches_completed(struct srcu_struct *sp)
{
968
	return sp->srcu_idx;
969 970 971 972
}
EXPORT_SYMBOL_GPL(srcu_batches_completed);

/*
973 974 975
 * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
 * completed in that state.
976
 */
977
static void srcu_advance_state(struct srcu_struct *sp)
978 979 980
{
	int idx;

981 982
	mutex_lock(&sp->srcu_gp_mutex);

983 984
	/*
	 * Because readers might be delayed for an extended period after
985
	 * fetching ->srcu_idx for their index, at any point in time there
986 987 988 989 990 991 992 993 994
	 * might well be readers using both idx=0 and idx=1.  We therefore
	 * need to wait for readers to clear from both index values before
	 * invoking a callback.
	 *
	 * The load-acquire ensures that we see the accesses performed
	 * by the prior grace period.
	 */
	idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
	if (idx == SRCU_STATE_IDLE) {
995 996 997 998 999
		spin_lock_irq(&sp->gp_lock);
		if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
			WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
			spin_unlock_irq(&sp->gp_lock);
			mutex_unlock(&sp->srcu_gp_mutex);
1000 1001 1002 1003 1004
			return;
		}
		idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
		if (idx == SRCU_STATE_IDLE)
			srcu_gp_start(sp);
1005 1006 1007
		spin_unlock_irq(&sp->gp_lock);
		if (idx != SRCU_STATE_IDLE) {
			mutex_unlock(&sp->srcu_gp_mutex);
1008
			return; /* Someone else started the grace period. */
1009
		}
1010 1011 1012
	}

	if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1013 1014 1015
		idx = 1 ^ (sp->srcu_idx & 1);
		if (!try_check_zero(sp, idx, 1)) {
			mutex_unlock(&sp->srcu_gp_mutex);
1016
			return; /* readers present, retry later. */
1017
		}
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
		srcu_flip(sp);
		rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
	}

	if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {

		/*
		 * SRCU read-side critical sections are normally short,
		 * so check at least twice in quick succession after a flip.
		 */
1028 1029 1030 1031 1032 1033
		idx = 1 ^ (sp->srcu_idx & 1);
		if (!try_check_zero(sp, idx, 2)) {
			mutex_unlock(&sp->srcu_gp_mutex);
			return; /* readers present, retry later. */
		}
		srcu_gp_end(sp);  /* Releases ->srcu_gp_mutex. */
1034 1035 1036 1037 1038 1039 1040 1041 1042
	}
}

/*
 * Invoke a limited number of SRCU callbacks that have passed through
 * their grace period.  If there are more to do, SRCU will reschedule
 * the workqueue.  Note that needed memory barriers have been executed
 * in this task's context by srcu_readers_active_idx_check().
 */
1043
static void srcu_invoke_callbacks(struct work_struct *work)
1044
{
1045
	bool more;
1046 1047
	struct rcu_cblist ready_cbs;
	struct rcu_head *rhp;
1048 1049
	struct srcu_data *sdp;
	struct srcu_struct *sp;
1050

1051 1052
	sdp = container_of(work, struct srcu_data, work.work);
	sp = sdp->sp;
1053
	rcu_cblist_init(&ready_cbs);
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	spin_lock_irq(&sdp->lock);
	smp_mb(); /* Old grace periods before callback invocation! */
	rcu_segcblist_advance(&sdp->srcu_cblist,
			      rcu_seq_current(&sp->srcu_gp_seq));
	if (sdp->srcu_cblist_invoking ||
	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
		spin_unlock_irq(&sdp->lock);
		return;  /* Someone else on the job or nothing to do. */
	}

	/* We are on the job!  Extract and invoke ready callbacks. */
	sdp->srcu_cblist_invoking = true;
	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
	spin_unlock_irq(&sdp->lock);
1068 1069 1070 1071 1072 1073
	rhp = rcu_cblist_dequeue(&ready_cbs);
	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
		local_bh_disable();
		rhp->func(rhp);
		local_bh_enable();
	}
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087

	/*
	 * Update counts, accelerate new callbacks, and if needed,
	 * schedule another round of callback invocation.
	 */
	spin_lock_irq(&sdp->lock);
	rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
				       rcu_seq_snap(&sp->srcu_gp_seq));
	sdp->srcu_cblist_invoking = false;
	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
	spin_unlock_irq(&sdp->lock);
	if (more)
		srcu_schedule_cbs_sdp(sdp, 0);
1088 1089 1090 1091 1092 1093 1094 1095
}

/*
 * Finished one round of SRCU grace period.  Start another if there are
 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
 */
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
{
1096
	bool pushgp = true;
1097

1098 1099 1100 1101 1102 1103 1104 1105 1106
	spin_lock_irq(&sp->gp_lock);
	if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
		if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
			/* All requests fulfilled, time to go idle. */
			pushgp = false;
		}
	} else if (!rcu_seq_state(sp->srcu_gp_seq)) {
		/* Outstanding request and no GP.  Start one. */
		srcu_gp_start(sp);
1107
	}
1108
	spin_unlock_irq(&sp->gp_lock);
1109

1110
	if (pushgp)
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
		queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
}

/*
 * This is the work-queue function that handles SRCU grace periods.
 */
void process_srcu(struct work_struct *work)
{
	struct srcu_struct *sp;

	sp = container_of(work, struct srcu_struct, work.work);

1123
	srcu_advance_state(sp);
1124
	srcu_reschedule(sp, srcu_get_delay(sp));
1125 1126
}
EXPORT_SYMBOL_GPL(process_srcu);
1127 1128

void srcutorture_get_gp_data(enum rcutorture_type test_type,
1129 1130
			     struct srcu_struct *sp, int *flags,
			     unsigned long *gpnum, unsigned long *completed)
1131 1132 1133 1134 1135 1136 1137 1138
{
	if (test_type != SRCU_FLAVOR)
		return;
	*flags = 0;
	*completed = rcu_seq_ctr(sp->srcu_gp_seq);
	*gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
}
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);