srcutree.c 42.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Sleepable Read-Copy Update mechanism for mutual exclusion.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 * Copyright (C) IBM Corporation, 2006
 * Copyright (C) Fujitsu, 2012
 *
 * Author: Paul McKenney <paulmck@us.ibm.com>
 *	   Lai Jiangshan <laijs@cn.fujitsu.com>
 *
 * For detailed explanation of Read-Copy Update mechanism see -
 *		Documentation/RCU/ *.txt
 *
 */

#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate_wait.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/delay.h>
37
#include <linux/module.h>
38 39 40
#include <linux/srcu.h>

#include "rcu.h"
41
#include "rcu_segcblist.h"
42

43 44 45
/* Holdoff in nanoseconds for auto-expediting. */
#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
46 47
module_param(exp_holdoff, ulong, 0444);

48 49 50 51
/* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
static ulong counter_wrap_check = (ULONG_MAX >> 2);
module_param(counter_wrap_check, ulong, 0444);

52 53
static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
54
static void process_srcu(struct work_struct *work);
55 56 57 58 59 60 61 62

/*
 * Initialize SRCU combining tree.  Note that statically allocated
 * srcu_struct structures might already have srcu_read_lock() and
 * srcu_read_unlock() running against them.  So if the is_static parameter
 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
 */
static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
63
{
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	int cpu;
	int i;
	int level = 0;
	int levelspread[RCU_NUM_LVLS];
	struct srcu_data *sdp;
	struct srcu_node *snp;
	struct srcu_node *snp_first;

	/* Work out the overall tree geometry. */
	sp->level[0] = &sp->node[0];
	for (i = 1; i < rcu_num_lvls; i++)
		sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
	rcu_init_levelspread(levelspread, num_rcu_lvl);

	/* Each pass through this loop initializes one srcu_node structure. */
	rcu_for_each_node_breadth_first(sp, snp) {
80
		raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
81 82 83
		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
			     ARRAY_SIZE(snp->srcu_data_have_cbs));
		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
84
			snp->srcu_have_cbs[i] = 0;
85 86
			snp->srcu_data_have_cbs[i] = 0;
		}
87
		snp->srcu_gp_seq_needed_exp = 0;
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
		snp->grplo = -1;
		snp->grphi = -1;
		if (snp == &sp->node[0]) {
			/* Root node, special case. */
			snp->srcu_parent = NULL;
			continue;
		}

		/* Non-root node. */
		if (snp == sp->level[level + 1])
			level++;
		snp->srcu_parent = sp->level[level - 1] +
				   (snp - sp->level[level]) /
				   levelspread[level - 1];
	}

	/*
	 * Initialize the per-CPU srcu_data array, which feeds into the
	 * leaves of the srcu_node tree.
	 */
	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
		     ARRAY_SIZE(sdp->srcu_unlock_count));
	level = rcu_num_lvls - 1;
	snp_first = sp->level[level];
	for_each_possible_cpu(cpu) {
		sdp = per_cpu_ptr(sp->sda, cpu);
114
		raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
115 116 117
		rcu_segcblist_init(&sdp->srcu_cblist);
		sdp->srcu_cblist_invoking = false;
		sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
118
		sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
119 120 121 122 123 124 125 126 127
		sdp->mynode = &snp_first[cpu / levelspread[level]];
		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
			if (snp->grplo < 0)
				snp->grplo = cpu;
			snp->grphi = cpu;
		}
		sdp->cpu = cpu;
		INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
		sdp->sp = sp;
128
		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
		if (is_static)
			continue;

		/* Dynamically allocated, better be no srcu_read_locks()! */
		for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
			sdp->srcu_lock_count[i] = 0;
			sdp->srcu_unlock_count[i] = 0;
		}
	}
}

/*
 * Initialize non-compile-time initialized fields, including the
 * associated srcu_node and srcu_data structures.  The is_static
 * parameter is passed through to init_srcu_struct_nodes(), and
 * also tells us that ->sda has already been wired up to srcu_data.
 */
static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
{
	mutex_init(&sp->srcu_cb_mutex);
	mutex_init(&sp->srcu_gp_mutex);
	sp->srcu_idx = 0;
151
	sp->srcu_gp_seq = 0;
152 153 154
	sp->srcu_barrier_seq = 0;
	mutex_init(&sp->srcu_barrier_mutex);
	atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
155
	INIT_DELAYED_WORK(&sp->work, process_srcu);
156 157 158
	if (!is_static)
		sp->sda = alloc_percpu(struct srcu_data);
	init_srcu_struct_nodes(sp, is_static);
159
	sp->srcu_gp_seq_needed_exp = 0;
160
	sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
161 162
	smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
	return sp->sda ? 0 : -ENOMEM;
163 164 165 166 167 168 169 170 171 172
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC

int __init_srcu_struct(struct srcu_struct *sp, const char *name,
		       struct lock_class_key *key)
{
	/* Don't re-initialize a lock while it is held. */
	debug_check_no_locks_freed((void *)sp, sizeof(*sp));
	lockdep_init_map(&sp->dep_map, name, key, 0);
173
	raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
174
	return init_srcu_struct_fields(sp, false);
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

/**
 * init_srcu_struct - initialize a sleep-RCU structure
 * @sp: structure to initialize.
 *
 * Must invoke this on a given srcu_struct before passing that srcu_struct
 * to any other function.  Each srcu_struct represents a separate domain
 * of SRCU protection.
 */
int init_srcu_struct(struct srcu_struct *sp)
{
190
	raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
191
	return init_srcu_struct_fields(sp, false);
192 193 194 195 196 197
}
EXPORT_SYMBOL_GPL(init_srcu_struct);

#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

/*
198 199 200
 * First-use initialization of statically allocated srcu_struct
 * structure.  Wiring up the combining tree is more than can be
 * done with compile-time initialization, so this check is added
201
 * to each update-side SRCU primitive.  Use sp->lock, which -is-
202 203 204 205 206 207 208 209 210 211 212
 * compile-time initialized, to resolve races involving multiple
 * CPUs trying to garner first-use privileges.
 */
static void check_init_srcu_struct(struct srcu_struct *sp)
{
	unsigned long flags;

	WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
	/* The smp_load_acquire() pairs with the smp_store_release(). */
	if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
		return; /* Already initialized. */
213
	raw_spin_lock_irqsave_rcu_node(sp, flags);
214
	if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
215
		raw_spin_unlock_irqrestore_rcu_node(sp, flags);
216 217 218
		return;
	}
	init_srcu_struct_fields(sp, true);
219
	raw_spin_unlock_irqrestore_rcu_node(sp, flags);
220 221 222 223 224
}

/*
 * Returns approximate total of the readers' ->srcu_lock_count[] values
 * for the rank of per-CPU counters specified by idx.
225 226 227 228 229 230 231
 */
static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
{
	int cpu;
	unsigned long sum = 0;

	for_each_possible_cpu(cpu) {
232
		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
233

234
		sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
235 236 237 238 239
	}
	return sum;
}

/*
240 241
 * Returns approximate total of the readers' ->srcu_unlock_count[] values
 * for the rank of per-CPU counters specified by idx.
242 243 244 245 246 247 248
 */
static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
{
	int cpu;
	unsigned long sum = 0;

	for_each_possible_cpu(cpu) {
249
		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
250

251
		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	}
	return sum;
}

/*
 * Return true if the number of pre-existing readers is determined to
 * be zero.
 */
static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
{
	unsigned long unlocks;

	unlocks = srcu_readers_unlock_idx(sp, idx);

	/*
	 * Make sure that a lock is always counted if the corresponding
	 * unlock is counted. Needs to be a smp_mb() as the read side may
	 * contain a read from a variable that is written to before the
	 * synchronize_srcu() in the write side. In this case smp_mb()s
	 * A and B act like the store buffering pattern.
	 *
	 * This smp_mb() also pairs with smp_mb() C to prevent accesses
	 * after the synchronize_srcu() from being executed before the
	 * grace period ends.
	 */
	smp_mb(); /* A */

	/*
	 * If the locks are the same as the unlocks, then there must have
	 * been no readers on this index at some time in between. This does
	 * not mean that there are no more readers, as one could have read
	 * the current index but not have incremented the lock counter yet.
	 *
285 286 287 288 289 290 291 292 293 294 295 296 297 298
	 * So suppose that the updater is preempted here for so long
	 * that more than ULONG_MAX non-nested readers come and go in
	 * the meantime.  It turns out that this cannot result in overflow
	 * because if a reader modifies its unlock count after we read it
	 * above, then that reader's next load of ->srcu_idx is guaranteed
	 * to get the new value, which will cause it to operate on the
	 * other bank of counters, where it cannot contribute to the
	 * overflow of these counters.  This means that there is a maximum
	 * of 2*NR_CPUS increments, which cannot overflow given current
	 * systems, especially not on 64-bit systems.
	 *
	 * OK, how about nesting?  This does impose a limit on nesting
	 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
	 * especially on 64-bit systems.
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	 */
	return srcu_readers_lock_idx(sp, idx) == unlocks;
}

/**
 * srcu_readers_active - returns true if there are readers. and false
 *                       otherwise
 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
 *
 * Note that this is not an atomic primitive, and can therefore suffer
 * severe errors when invoked on an active srcu_struct.  That said, it
 * can be useful as an error check at cleanup time.
 */
static bool srcu_readers_active(struct srcu_struct *sp)
{
	int cpu;
	unsigned long sum = 0;

	for_each_possible_cpu(cpu) {
318
		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
319

320 321 322 323
		sum += READ_ONCE(cpuc->srcu_lock_count[0]);
		sum += READ_ONCE(cpuc->srcu_lock_count[1]);
		sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
		sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
324 325 326 327 328 329
	}
	return sum;
}

#define SRCU_INTERVAL		1

330 331 332 333 334 335 336 337 338 339 340 341
/*
 * Return grace-period delay, zero if there are expedited grace
 * periods pending, SRCU_INTERVAL otherwise.
 */
static unsigned long srcu_get_delay(struct srcu_struct *sp)
{
	if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
			 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
		return 0;
	return SRCU_INTERVAL;
}

342 343 344 345 346 347 348 349 350
/**
 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
 * @sp: structure to clean up.
 *
 * Must invoke this after you are finished using a given srcu_struct that
 * was initialized via init_srcu_struct(), else you leak memory.
 */
void cleanup_srcu_struct(struct srcu_struct *sp)
{
351 352
	int cpu;

353 354
	if (WARN_ON(!srcu_get_delay(sp)))
		return; /* Leakage unless caller handles error. */
355 356 357
	if (WARN_ON(srcu_readers_active(sp)))
		return; /* Leakage unless caller handles error. */
	flush_delayed_work(&sp->work);
358 359 360 361 362
	for_each_possible_cpu(cpu)
		flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
	if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
	    WARN_ON(srcu_readers_active(sp))) {
		pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
363 364
		return; /* Caller forgot to stop doing call_srcu()? */
	}
365 366
	free_percpu(sp->sda);
	sp->sda = NULL;
367 368 369 370 371
}
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);

/*
 * Counts the new reader in the appropriate per-CPU element of the
372
 * srcu_struct.
373 374 375 376 377 378
 * Returns an index that must be passed to the matching srcu_read_unlock().
 */
int __srcu_read_lock(struct srcu_struct *sp)
{
	int idx;

379
	idx = READ_ONCE(sp->srcu_idx) & 0x1;
380
	this_cpu_inc(sp->sda->srcu_lock_count[idx]);
381 382 383 384 385 386 387 388 389 390 391 392 393
	smp_mb(); /* B */  /* Avoid leaking the critical section. */
	return idx;
}
EXPORT_SYMBOL_GPL(__srcu_read_lock);

/*
 * Removes the count for the old reader from the appropriate per-CPU
 * element of the srcu_struct.  Note that this may well be a different
 * CPU than that which was incremented by the corresponding srcu_read_lock().
 */
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
	smp_mb(); /* C */  /* Avoid leaking the critical section. */
394
	this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);

/*
 * We use an adaptive strategy for synchronize_srcu() and especially for
 * synchronize_srcu_expedited().  We spin for a fixed time period
 * (defined below) to allow SRCU readers to exit their read-side critical
 * sections.  If there are still some readers after a few microseconds,
 * we repeatedly block for 1-millisecond time periods.
 */
#define SRCU_RETRY_CHECK_DELAY		5

/*
 * Start an SRCU grace period.
 */
static void srcu_gp_start(struct srcu_struct *sp)
{
412
	struct srcu_data *sdp = this_cpu_ptr(sp->sda);
413 414
	int state;

415
	lockdep_assert_held(&sp->lock);
416 417 418 419 420
	WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
	rcu_segcblist_advance(&sdp->srcu_cblist,
			      rcu_seq_current(&sp->srcu_gp_seq));
	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
				       rcu_seq_snap(&sp->srcu_gp_seq));
421
	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
422 423 424 425 426
	rcu_seq_start(&sp->srcu_gp_seq);
	state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
	WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
}

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
/*
 * Track online CPUs to guide callback workqueue placement.
 */
DEFINE_PER_CPU(bool, srcu_online);

void srcu_online_cpu(unsigned int cpu)
{
	WRITE_ONCE(per_cpu(srcu_online, cpu), true);
}

void srcu_offline_cpu(unsigned int cpu)
{
	WRITE_ONCE(per_cpu(srcu_online, cpu), false);
}

/*
 * Place the workqueue handler on the specified CPU if online, otherwise
 * just run it whereever.  This is useful for placing workqueue handlers
 * that are to invoke the specified CPU's callbacks.
 */
static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
				       struct delayed_work *dwork,
				       unsigned long delay)
{
	bool ret;

	preempt_disable();
	if (READ_ONCE(per_cpu(srcu_online, cpu)))
		ret = queue_delayed_work_on(cpu, wq, dwork, delay);
	else
		ret = queue_delayed_work(wq, dwork, delay);
	preempt_enable();
	return ret;
}

/*
 * Schedule callback invocation for the specified srcu_data structure,
 * if possible, on the corresponding CPU.
 */
static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
{
	srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq,
				   &sdp->work, delay);
}

/*
 * Schedule callback invocation for all srcu_data structures associated
474 475 476
 * with the specified srcu_node structure that have callbacks for the
 * just-completed grace period, the one corresponding to idx.  If possible,
 * schedule this invocation on the corresponding CPUs.
477
 */
478
static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
479
				  unsigned long mask, unsigned long delay)
480 481 482
{
	int cpu;

483 484 485
	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
		if (!(mask & (1 << (cpu - snp->grplo))))
			continue;
486
		srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
487
	}
488 489 490 491 492 493 494 495 496 497 498 499 500
}

/*
 * Note the end of an SRCU grace period.  Initiates callback invocation
 * and starts a new grace period if needed.
 *
 * The ->srcu_cb_mutex acquisition does not protect any data, but
 * instead prevents more than one grace period from starting while we
 * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
 * array to have a finite number of elements.
 */
static void srcu_gp_end(struct srcu_struct *sp)
{
501
	unsigned long cbdelay;
502
	bool cbs;
503 504
	int cpu;
	unsigned long flags;
505 506 507
	unsigned long gpseq;
	int idx;
	int idxnext;
508
	unsigned long mask;
509
	struct srcu_data *sdp;
510 511 512 513 514 515
	struct srcu_node *snp;

	/* Prevent more than one additional grace period. */
	mutex_lock(&sp->srcu_cb_mutex);

	/* End the current grace period. */
516
	raw_spin_lock_irq_rcu_node(sp);
517 518
	idx = rcu_seq_state(sp->srcu_gp_seq);
	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
519
	cbdelay = srcu_get_delay(sp);
520
	sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
521 522
	rcu_seq_end(&sp->srcu_gp_seq);
	gpseq = rcu_seq_current(&sp->srcu_gp_seq);
523 524
	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
		sp->srcu_gp_seq_needed_exp = gpseq;
525
	raw_spin_unlock_irq_rcu_node(sp);
526 527 528 529 530 531 532
	mutex_unlock(&sp->srcu_gp_mutex);
	/* A new grace period can start at this point.  But only one. */

	/* Initiate callback invocation as needed. */
	idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
	idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
	rcu_for_each_node_breadth_first(sp, snp) {
533
		raw_spin_lock_irq_rcu_node(snp);
534 535 536 537 538
		cbs = false;
		if (snp >= sp->level[rcu_num_lvls - 1])
			cbs = snp->srcu_have_cbs[idx] == gpseq;
		snp->srcu_have_cbs[idx] = gpseq;
		rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
539 540
		if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
			snp->srcu_gp_seq_needed_exp = gpseq;
541 542
		mask = snp->srcu_data_have_cbs[idx];
		snp->srcu_data_have_cbs[idx] = 0;
543 544
		raw_spin_unlock_irq_rcu_node(snp);
		if (cbs)
545
			srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
546 547 548 549 550

		/* Occasionally prevent srcu_data counter wrap. */
		if (!(gpseq & counter_wrap_check))
			for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
				sdp = per_cpu_ptr(sp->sda, cpu);
551
				raw_spin_lock_irqsave_rcu_node(sdp, flags);
552 553 554
				if (ULONG_CMP_GE(gpseq,
						 sdp->srcu_gp_seq_needed + 100))
					sdp->srcu_gp_seq_needed = gpseq;
555
				raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
556
			}
557 558 559 560 561 562
	}

	/* Callback initiation done, allow grace periods after next. */
	mutex_unlock(&sp->srcu_cb_mutex);

	/* Start a new grace period if needed. */
563
	raw_spin_lock_irq_rcu_node(sp);
564 565 566 567
	gpseq = rcu_seq_current(&sp->srcu_gp_seq);
	if (!rcu_seq_state(gpseq) &&
	    ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
		srcu_gp_start(sp);
568
		raw_spin_unlock_irq_rcu_node(sp);
569
		/* Throttle expedited grace periods: Should be rare! */
570 571
		srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
				    ? 0 : SRCU_INTERVAL);
572
	} else {
573
		raw_spin_unlock_irq_rcu_node(sp);
574 575 576
	}
}

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
/*
 * Funnel-locking scheme to scalably mediate many concurrent expedited
 * grace-period requests.  This function is invoked for the first known
 * expedited request for a grace period that has already been requested,
 * but without expediting.  To start a completely new grace period,
 * whether expedited or not, use srcu_funnel_gp_start() instead.
 */
static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
				  unsigned long s)
{
	unsigned long flags;

	for (; snp != NULL; snp = snp->srcu_parent) {
		if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
		    ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
			return;
593
		raw_spin_lock_irqsave_rcu_node(snp, flags);
594
		if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
595
			raw_spin_unlock_irqrestore_rcu_node(snp, flags);
596 597 598
			return;
		}
		WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
599
		raw_spin_unlock_irqrestore_rcu_node(snp, flags);
600
	}
601
	raw_spin_lock_irqsave_rcu_node(sp, flags);
602 603
	if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
		sp->srcu_gp_seq_needed_exp = s;
604
	raw_spin_unlock_irqrestore_rcu_node(sp, flags);
605 606
}

607 608 609 610 611 612 613
/*
 * Funnel-locking scheme to scalably mediate many concurrent grace-period
 * requests.  The winner has to do the work of actually starting grace
 * period s.  Losers must either ensure that their desired grace-period
 * number is recorded on at least their leaf srcu_node structure, or they
 * must take steps to invoke their own callbacks.
 */
614 615
static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
				 unsigned long s, bool do_norm)
616 617 618 619 620 621 622 623 624 625
{
	unsigned long flags;
	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
	struct srcu_node *snp = sdp->mynode;
	unsigned long snp_seq;

	/* Each pass through the loop does one level of the srcu_node tree. */
	for (; snp != NULL; snp = snp->srcu_parent) {
		if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
			return; /* GP already done and CBs recorded. */
626
		raw_spin_lock_irqsave_rcu_node(snp, flags);
627 628
		if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
			snp_seq = snp->srcu_have_cbs[idx];
629 630
			if (snp == sdp->mynode && snp_seq == s)
				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
631
			raw_spin_unlock_irqrestore_rcu_node(snp, flags);
632
			if (snp == sdp->mynode && snp_seq != s) {
633 634 635 636
				srcu_schedule_cbs_sdp(sdp, do_norm
							   ? SRCU_INTERVAL
							   : 0);
				return;
637
			}
638 639
			if (!do_norm)
				srcu_funnel_exp_start(sp, snp, s);
640 641 642
			return;
		}
		snp->srcu_have_cbs[idx] = s;
643 644
		if (snp == sdp->mynode)
			snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
645 646
		if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
			snp->srcu_gp_seq_needed_exp = s;
647
		raw_spin_unlock_irqrestore_rcu_node(snp, flags);
648 649 650
	}

	/* Top of tree, must ensure the grace period will be started. */
651
	raw_spin_lock_irqsave_rcu_node(sp, flags);
652 653 654 655 656 657 658
	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
		/*
		 * Record need for grace period s.  Pair with load
		 * acquire setting up for initialization.
		 */
		smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
	}
659 660
	if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
		sp->srcu_gp_seq_needed_exp = s;
661 662 663 664 665 666 667

	/* If grace period not already done and none in progress, start it. */
	if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
	    rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
		WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
		srcu_gp_start(sp);
		queue_delayed_work(system_power_efficient_wq, &sp->work,
668
				   srcu_get_delay(sp));
669
	}
670
	raw_spin_unlock_irqrestore_rcu_node(sp, flags);
671 672
}

673 674 675
/*
 * Wait until all readers counted by array index idx complete, but
 * loop an additional time if there is an expedited grace period pending.
676
 * The caller must ensure that ->srcu_idx is not changed while checking.
677 678 679 680 681 682
 */
static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
{
	for (;;) {
		if (srcu_readers_active_idx_check(sp, idx))
			return true;
683
		if (--trycount + !srcu_get_delay(sp) <= 0)
684 685 686 687 688 689
			return false;
		udelay(SRCU_RETRY_CHECK_DELAY);
	}
}

/*
690 691
 * Increment the ->srcu_idx counter so that future SRCU readers will
 * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
692 693 694 695
 * us to wait for pre-existing readers in a starvation-free manner.
 */
static void srcu_flip(struct srcu_struct *sp)
{
696 697 698 699 700 701 702 703 704 705
	/*
	 * Ensure that if this updater saw a given reader's increment
	 * from __srcu_read_lock(), that reader was using an old value
	 * of ->srcu_idx.  Also ensure that if a given reader sees the
	 * new value of ->srcu_idx, this updater's earlier scans cannot
	 * have seen that reader's increments (which is OK, because this
	 * grace period need not wait on that reader).
	 */
	smp_mb(); /* E */  /* Pairs with B and C. */

706
	WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
707 708 709 710 711 712 713 714 715 716 717

	/*
	 * Ensure that if the updater misses an __srcu_read_unlock()
	 * increment, that task's next __srcu_read_lock() will see the
	 * above counter update.  Note that both this memory barrier
	 * and the one in srcu_readers_active_idx_check() provide the
	 * guarantee for __srcu_read_lock().
	 */
	smp_mb(); /* D */  /* Pairs with C. */
}

718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
/*
 * If SRCU is likely idle, return true, otherwise return false.
 *
 * Note that it is OK for several current from-idle requests for a new
 * grace period from idle to specify expediting because they will all end
 * up requesting the same grace period anyhow.  So no loss.
 *
 * Note also that if any CPU (including the current one) is still invoking
 * callbacks, this function will nevertheless say "idle".  This is not
 * ideal, but the overhead of checking all CPUs' callback lists is even
 * less ideal, especially on large systems.  Furthermore, the wakeup
 * can happen before the callback is fully removed, so we have no choice
 * but to accept this type of error.
 *
 * This function is also subject to counter-wrap errors, but let's face
 * it, if this function was preempted for enough time for the counters
 * to wrap, it really doesn't matter whether or not we expedite the grace
 * period.  The extra overhead of a needlessly expedited grace period is
 * negligible when amoritized over that time period, and the extra latency
 * of a needlessly non-expedited grace period is similarly negligible.
 */
static bool srcu_might_be_idle(struct srcu_struct *sp)
{
741
	unsigned long curseq;
742 743
	unsigned long flags;
	struct srcu_data *sdp;
744
	unsigned long t;
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759

	/* If the local srcu_data structure has callbacks, not idle.  */
	local_irq_save(flags);
	sdp = this_cpu_ptr(sp->sda);
	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
		local_irq_restore(flags);
		return false; /* Callbacks already present, so not idle. */
	}
	local_irq_restore(flags);

	/*
	 * No local callbacks, so probabalistically probe global state.
	 * Exact information would require acquiring locks, which would
	 * kill scalability, hence the probabalistic nature of the probe.
	 */
760 761 762 763 764 765 766 767 768

	/* First, see if enough time has passed since the last GP. */
	t = ktime_get_mono_fast_ns();
	if (exp_holdoff == 0 ||
	    time_in_range_open(t, sp->srcu_last_gp_end,
			       sp->srcu_last_gp_end + exp_holdoff))
		return false; /* Too soon after last GP. */

	/* Next, check for probable idleness. */
769 770 771 772 773 774 775 776 777 778
	curseq = rcu_seq_current(&sp->srcu_gp_seq);
	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
	if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
		return false; /* Grace period in progress, so not idle. */
	smp_mb(); /* Order ->srcu_gp_seq with prior access. */
	if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
		return false; /* GP # changed, so not idle. */
	return true; /* With reasonable probability, idle! */
}

779 780 781 782 783 784 785
/*
 * SRCU callback function to leak a callback.
 */
static void srcu_leak_callback(struct rcu_head *rhp)
{
}

786
/*
787 788 789
 * Enqueue an SRCU callback on the srcu_data structure associated with
 * the current CPU and the specified srcu_struct structure, initiating
 * grace-period processing if it is not already running.
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
 *
 * Note that all CPUs must agree that the grace period extended beyond
 * all pre-existing SRCU read-side critical section.  On systems with
 * more than one CPU, this means that when "func()" is invoked, each CPU
 * is guaranteed to have executed a full memory barrier since the end of
 * its last corresponding SRCU read-side critical section whose beginning
 * preceded the call to call_rcu().  It also means that each CPU executing
 * an SRCU read-side critical section that continues beyond the start of
 * "func()" must have executed a memory barrier after the call_rcu()
 * but before the beginning of that SRCU read-side critical section.
 * Note that these guarantees include CPUs that are offline, idle, or
 * executing in user mode, as well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
 * resulting SRCU callback function "func()", then both CPU A and CPU
 * B are guaranteed to execute a full memory barrier during the time
 * interval between the call to call_rcu() and the invocation of "func()".
 * This guarantee applies even if CPU A and CPU B are the same CPU (but
 * again only if the system has more than one CPU).
 *
 * Of course, these guarantees apply only for invocations of call_srcu(),
 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
 * srcu_struct structure.
 */
814 815
void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
		 rcu_callback_t func, bool do_norm)
816 817
{
	unsigned long flags;
818
	bool needexp = false;
819 820 821 822 823
	bool needgp = false;
	unsigned long s;
	struct srcu_data *sdp;

	check_init_srcu_struct(sp);
824 825 826 827 828 829
	if (debug_rcu_head_queue(rhp)) {
		/* Probable double call_srcu(), so leak the callback. */
		WRITE_ONCE(rhp->func, srcu_leak_callback);
		WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
		return;
	}
830 831 832
	rhp->func = func;
	local_irq_save(flags);
	sdp = this_cpu_ptr(sp->sda);
833
	raw_spin_lock_rcu_node(sdp);
834 835 836 837 838 839 840 841
	rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
	rcu_segcblist_advance(&sdp->srcu_cblist,
			      rcu_seq_current(&sp->srcu_gp_seq));
	s = rcu_seq_snap(&sp->srcu_gp_seq);
	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
		sdp->srcu_gp_seq_needed = s;
		needgp = true;
842
	}
843 844 845 846
	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
		sdp->srcu_gp_seq_needed_exp = s;
		needexp = true;
	}
847
	raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
848
	if (needgp)
849 850 851 852 853
		srcu_funnel_gp_start(sp, sdp, s, do_norm);
	else if (needexp)
		srcu_funnel_exp_start(sp, sdp->mynode, s);
}

854 855 856
/**
 * call_srcu() - Queue a callback for invocation after an SRCU grace period
 * @sp: srcu_struct in queue the callback
857
 * @rhp: structure to be used for queueing the SRCU callback.
858 859 860 861 862 863 864 865 866 867 868 869 870
 * @func: function to be invoked after the SRCU grace period
 *
 * The callback function will be invoked some time after a full SRCU
 * grace period elapses, in other words after all pre-existing SRCU
 * read-side critical sections have completed.  However, the callback
 * function might well execute concurrently with other SRCU read-side
 * critical sections that started after call_srcu() was invoked.  SRCU
 * read-side critical sections are delimited by srcu_read_lock() and
 * srcu_read_unlock(), and may be nested.
 *
 * The callback will be invoked from process context, but must nevertheless
 * be fast and must not block.
 */
871 872 873 874
void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
	       rcu_callback_t func)
{
	__call_srcu(sp, rhp, func, true);
875 876 877 878 879 880
}
EXPORT_SYMBOL_GPL(call_srcu);

/*
 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
 */
881
static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
882 883 884 885 886 887 888 889 890 891 892 893
{
	struct rcu_synchronize rcu;

	RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
			 lock_is_held(&rcu_bh_lock_map) ||
			 lock_is_held(&rcu_lock_map) ||
			 lock_is_held(&rcu_sched_lock_map),
			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");

	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
		return;
	might_sleep();
894
	check_init_srcu_struct(sp);
895
	init_completion(&rcu.completion);
896
	init_rcu_head_on_stack(&rcu.head);
897
	__call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
898
	wait_for_completion(&rcu.completion);
899
	destroy_rcu_head_on_stack(&rcu.head);
900 901 902 903 904 905 906 907 908

	/*
	 * Make sure that later code is ordered after the SRCU grace
	 * period.  This pairs with the raw_spin_lock_irq_rcu_node()
	 * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
	 * because the current CPU might have been totally uninvolved with
	 * (and thus unordered against) that grace period.
	 */
	smp_mb();
909 910 911 912 913 914 915 916 917 918 919 920 921 922
}

/**
 * synchronize_srcu_expedited - Brute-force SRCU grace period
 * @sp: srcu_struct with which to synchronize.
 *
 * Wait for an SRCU grace period to elapse, but be more aggressive about
 * spinning rather than blocking when waiting.
 *
 * Note that synchronize_srcu_expedited() has the same deadlock and
 * memory-ordering properties as does synchronize_srcu().
 */
void synchronize_srcu_expedited(struct srcu_struct *sp)
{
923
	__synchronize_srcu(sp, rcu_gp_is_normal());
924 925 926 927 928 929 930 931 932
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);

/**
 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
 * @sp: srcu_struct with which to synchronize.
 *
 * Wait for the count to drain to zero of both indexes. To avoid the
 * possible starvation of synchronize_srcu(), it waits for the count of
933 934
 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
 * and then flip the srcu_idx and wait for the count of the other index.
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
 *
 * Can block; must be called from process context.
 *
 * Note that it is illegal to call synchronize_srcu() from the corresponding
 * SRCU read-side critical section; doing so will result in deadlock.
 * However, it is perfectly legal to call synchronize_srcu() on one
 * srcu_struct from some other srcu_struct's read-side critical section,
 * as long as the resulting graph of srcu_structs is acyclic.
 *
 * There are memory-ordering constraints implied by synchronize_srcu().
 * On systems with more than one CPU, when synchronize_srcu() returns,
 * each CPU is guaranteed to have executed a full memory barrier since
 * the end of its last corresponding SRCU-sched read-side critical section
 * whose beginning preceded the call to synchronize_srcu().  In addition,
 * each CPU having an SRCU read-side critical section that extends beyond
 * the return from synchronize_srcu() is guaranteed to have executed a
 * full memory barrier after the beginning of synchronize_srcu() and before
 * the beginning of that SRCU read-side critical section.  Note that these
 * guarantees include CPUs that are offline, idle, or executing in user mode,
 * as well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
 * to have executed a full memory barrier during the execution of
 * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
 * are the same CPU, but again only if the system has more than one CPU.
 *
 * Of course, these memory-ordering guarantees apply only when
 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
 * passed the same srcu_struct structure.
965 966 967 968 969
 *
 * If SRCU is likely idle, expedite the first request.  This semantic
 * was provided by Classic SRCU, and is relied upon by its users, so TREE
 * SRCU must also provide it.  Note that detecting idleness is heuristic
 * and subject to both false positives and negatives.
970 971 972
 */
void synchronize_srcu(struct srcu_struct *sp)
{
973
	if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
974 975
		synchronize_srcu_expedited(sp);
	else
976
		__synchronize_srcu(sp, true);
977 978 979
}
EXPORT_SYMBOL_GPL(synchronize_srcu);

980 981 982 983 984 985 986 987 988 989 990 991 992 993
/*
 * Callback function for srcu_barrier() use.
 */
static void srcu_barrier_cb(struct rcu_head *rhp)
{
	struct srcu_data *sdp;
	struct srcu_struct *sp;

	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
	sp = sdp->sp;
	if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
		complete(&sp->srcu_barrier_completion);
}

994 995 996 997 998 999
/**
 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
 * @sp: srcu_struct on which to wait for in-flight callbacks.
 */
void srcu_barrier(struct srcu_struct *sp)
{
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	int cpu;
	struct srcu_data *sdp;
	unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);

	check_init_srcu_struct(sp);
	mutex_lock(&sp->srcu_barrier_mutex);
	if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
		smp_mb(); /* Force ordering following return. */
		mutex_unlock(&sp->srcu_barrier_mutex);
		return; /* Someone else did our work for us. */
	}
	rcu_seq_start(&sp->srcu_barrier_seq);
	init_completion(&sp->srcu_barrier_completion);

	/* Initial count prevents reaching zero until all CBs are posted. */
	atomic_set(&sp->srcu_barrier_cpu_cnt, 1);

	/*
	 * Each pass through this loop enqueues a callback, but only
	 * on CPUs already having callbacks enqueued.  Note that if
	 * a CPU already has callbacks enqueue, it must have already
	 * registered the need for a future grace period, so all we
	 * need do is enqueue a callback that will use the same
	 * grace period as the last callback already in the queue.
	 */
	for_each_possible_cpu(cpu) {
		sdp = per_cpu_ptr(sp->sda, cpu);
1027
		raw_spin_lock_irq_rcu_node(sdp);
1028 1029
		atomic_inc(&sp->srcu_barrier_cpu_cnt);
		sdp->srcu_barrier_head.func = srcu_barrier_cb;
1030
		debug_rcu_head_queue(&sdp->srcu_barrier_head);
1031
		if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1032 1033
					   &sdp->srcu_barrier_head, 0)) {
			debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1034
			atomic_dec(&sp->srcu_barrier_cpu_cnt);
1035
		}
1036
		raw_spin_unlock_irq_rcu_node(sdp);
1037 1038 1039 1040 1041 1042 1043 1044 1045
	}

	/* Remove the initial count, at which point reaching zero can happen. */
	if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
		complete(&sp->srcu_barrier_completion);
	wait_for_completion(&sp->srcu_barrier_completion);

	rcu_seq_end(&sp->srcu_barrier_seq);
	mutex_unlock(&sp->srcu_barrier_mutex);
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
}
EXPORT_SYMBOL_GPL(srcu_barrier);

/**
 * srcu_batches_completed - return batches completed.
 * @sp: srcu_struct on which to report batch completion.
 *
 * Report the number of batches, correlated with, but not necessarily
 * precisely the same as, the number of grace periods that have elapsed.
 */
unsigned long srcu_batches_completed(struct srcu_struct *sp)
{
1058
	return sp->srcu_idx;
1059 1060 1061 1062
}
EXPORT_SYMBOL_GPL(srcu_batches_completed);

/*
1063 1064 1065
 * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
 * completed in that state.
1066
 */
1067
static void srcu_advance_state(struct srcu_struct *sp)
1068 1069 1070
{
	int idx;

1071 1072
	mutex_lock(&sp->srcu_gp_mutex);

1073 1074
	/*
	 * Because readers might be delayed for an extended period after
1075
	 * fetching ->srcu_idx for their index, at any point in time there
1076 1077 1078 1079 1080 1081 1082 1083 1084
	 * might well be readers using both idx=0 and idx=1.  We therefore
	 * need to wait for readers to clear from both index values before
	 * invoking a callback.
	 *
	 * The load-acquire ensures that we see the accesses performed
	 * by the prior grace period.
	 */
	idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
	if (idx == SRCU_STATE_IDLE) {
1085
		raw_spin_lock_irq_rcu_node(sp);
1086 1087
		if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
			WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1088
			raw_spin_unlock_irq_rcu_node(sp);
1089
			mutex_unlock(&sp->srcu_gp_mutex);
1090 1091 1092 1093 1094
			return;
		}
		idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
		if (idx == SRCU_STATE_IDLE)
			srcu_gp_start(sp);
1095
		raw_spin_unlock_irq_rcu_node(sp);
1096 1097
		if (idx != SRCU_STATE_IDLE) {
			mutex_unlock(&sp->srcu_gp_mutex);
1098
			return; /* Someone else started the grace period. */
1099
		}
1100 1101 1102
	}

	if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1103 1104 1105
		idx = 1 ^ (sp->srcu_idx & 1);
		if (!try_check_zero(sp, idx, 1)) {
			mutex_unlock(&sp->srcu_gp_mutex);
1106
			return; /* readers present, retry later. */
1107
		}
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
		srcu_flip(sp);
		rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
	}

	if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {

		/*
		 * SRCU read-side critical sections are normally short,
		 * so check at least twice in quick succession after a flip.
		 */
1118 1119 1120 1121 1122 1123
		idx = 1 ^ (sp->srcu_idx & 1);
		if (!try_check_zero(sp, idx, 2)) {
			mutex_unlock(&sp->srcu_gp_mutex);
			return; /* readers present, retry later. */
		}
		srcu_gp_end(sp);  /* Releases ->srcu_gp_mutex. */
1124 1125 1126 1127 1128 1129 1130 1131 1132
	}
}

/*
 * Invoke a limited number of SRCU callbacks that have passed through
 * their grace period.  If there are more to do, SRCU will reschedule
 * the workqueue.  Note that needed memory barriers have been executed
 * in this task's context by srcu_readers_active_idx_check().
 */
1133
static void srcu_invoke_callbacks(struct work_struct *work)
1134
{
1135
	bool more;
1136 1137
	struct rcu_cblist ready_cbs;
	struct rcu_head *rhp;
1138 1139
	struct srcu_data *sdp;
	struct srcu_struct *sp;
1140

1141 1142
	sdp = container_of(work, struct srcu_data, work.work);
	sp = sdp->sp;
1143
	rcu_cblist_init(&ready_cbs);
1144
	raw_spin_lock_irq_rcu_node(sdp);
1145 1146 1147 1148
	rcu_segcblist_advance(&sdp->srcu_cblist,
			      rcu_seq_current(&sp->srcu_gp_seq));
	if (sdp->srcu_cblist_invoking ||
	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1149
		raw_spin_unlock_irq_rcu_node(sdp);
1150 1151 1152 1153 1154 1155
		return;  /* Someone else on the job or nothing to do. */
	}

	/* We are on the job!  Extract and invoke ready callbacks. */
	sdp->srcu_cblist_invoking = true;
	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1156
	raw_spin_unlock_irq_rcu_node(sdp);
1157 1158
	rhp = rcu_cblist_dequeue(&ready_cbs);
	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1159
		debug_rcu_head_unqueue(rhp);
1160 1161 1162 1163
		local_bh_disable();
		rhp->func(rhp);
		local_bh_enable();
	}
1164 1165 1166 1167 1168

	/*
	 * Update counts, accelerate new callbacks, and if needed,
	 * schedule another round of callback invocation.
	 */
1169
	raw_spin_lock_irq_rcu_node(sdp);
1170 1171 1172 1173 1174
	rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
				       rcu_seq_snap(&sp->srcu_gp_seq));
	sdp->srcu_cblist_invoking = false;
	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1175
	raw_spin_unlock_irq_rcu_node(sdp);
1176 1177
	if (more)
		srcu_schedule_cbs_sdp(sdp, 0);
1178 1179 1180 1181 1182 1183 1184 1185
}

/*
 * Finished one round of SRCU grace period.  Start another if there are
 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
 */
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
{
1186
	bool pushgp = true;
1187

1188
	raw_spin_lock_irq_rcu_node(sp);
1189 1190 1191 1192 1193 1194 1195 1196
	if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
		if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
			/* All requests fulfilled, time to go idle. */
			pushgp = false;
		}
	} else if (!rcu_seq_state(sp->srcu_gp_seq)) {
		/* Outstanding request and no GP.  Start one. */
		srcu_gp_start(sp);
1197
	}
1198
	raw_spin_unlock_irq_rcu_node(sp);
1199

1200
	if (pushgp)
1201 1202 1203 1204 1205 1206
		queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
}

/*
 * This is the work-queue function that handles SRCU grace periods.
 */
1207
static void process_srcu(struct work_struct *work)
1208 1209 1210 1211 1212
{
	struct srcu_struct *sp;

	sp = container_of(work, struct srcu_struct, work.work);

1213
	srcu_advance_state(sp);
1214
	srcu_reschedule(sp, srcu_get_delay(sp));
1215
}
1216 1217

void srcutorture_get_gp_data(enum rcutorture_type test_type,
1218 1219
			     struct srcu_struct *sp, int *flags,
			     unsigned long *gpnum, unsigned long *completed)
1220 1221 1222 1223 1224 1225 1226 1227
{
	if (test_type != SRCU_FLAVOR)
		return;
	*flags = 0;
	*completed = rcu_seq_ctr(sp->srcu_gp_seq);
	*gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
}
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1228

1229 1230 1231 1232
void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
{
	int cpu;
	int idx;
1233
	unsigned long s0 = 0, s1 = 0;
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258

	idx = sp->srcu_idx & 0x1;
	pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
	for_each_possible_cpu(cpu) {
		unsigned long l0, l1;
		unsigned long u0, u1;
		long c0, c1;
		struct srcu_data *counts;

		counts = per_cpu_ptr(sp->sda, cpu);
		u0 = counts->srcu_unlock_count[!idx];
		u1 = counts->srcu_unlock_count[idx];

		/*
		 * Make sure that a lock is always counted if the corresponding
		 * unlock is counted.
		 */
		smp_rmb();

		l0 = counts->srcu_lock_count[!idx];
		l1 = counts->srcu_lock_count[idx];

		c0 = l0 - u0;
		c1 = l1 - u1;
		pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
1259 1260
		s0 += c0;
		s1 += c1;
1261
	}
1262
	pr_cont(" T(%ld,%ld)\n", s0, s1);
1263 1264 1265
}
EXPORT_SYMBOL_GPL(srcu_torture_stats_print);

1266 1267 1268
static int __init srcu_bootup_announce(void)
{
	pr_info("Hierarchical SRCU implementation.\n");
1269 1270
	if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
		pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1271 1272 1273
	return 0;
}
early_initcall(srcu_bootup_announce);