srcu.c 21.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Sleepable Read-Copy Update mechanism for mutual exclusion.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
15 16
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
17 18
 *
 * Copyright (C) IBM Corporation, 2006
19
 * Copyright (C) Fujitsu, 2012
20 21
 *
 * Author: Paul McKenney <paulmck@us.ibm.com>
22
 *	   Lai Jiangshan <laijs@cn.fujitsu.com>
23 24 25 26 27 28
 *
 * For detailed explanation of Read-Copy Update mechanism see -
 * 		Documentation/RCU/ *.txt
 *
 */

29
#include <linux/export.h>
30 31 32 33 34 35
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/smp.h>
36
#include <linux/delay.h>
37 38
#include <linux/srcu.h>

39 40
#include "rcu.h"

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
/*
 * Initialize an rcu_batch structure to empty.
 */
static inline void rcu_batch_init(struct rcu_batch *b)
{
	b->head = NULL;
	b->tail = &b->head;
}

/*
 * Enqueue a callback onto the tail of the specified rcu_batch structure.
 */
static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
{
	*b->tail = head;
	b->tail = &head->next;
}

/*
 * Is the specified rcu_batch structure empty?
 */
static inline bool rcu_batch_empty(struct rcu_batch *b)
{
	return b->tail == &b->head;
}

/*
 * Remove the callback at the head of the specified rcu_batch structure
 * and return a pointer to it, or return NULL if the structure is empty.
 */
static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
{
	struct rcu_head *head;

	if (rcu_batch_empty(b))
		return NULL;

	head = b->head;
	b->head = head->next;
	if (b->tail == &head->next)
		rcu_batch_init(b);

	return head;
}

/*
 * Move all callbacks from the rcu_batch structure specified by "from" to
 * the structure specified by "to".
 */
static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
{
	if (!rcu_batch_empty(from)) {
		*to->tail = from->head;
		to->tail = from->tail;
		rcu_batch_init(from);
	}
}

99 100 101
static int init_srcu_struct_fields(struct srcu_struct *sp)
{
	sp->completed = 0;
102 103 104 105 106 107 108
	spin_lock_init(&sp->queue_lock);
	sp->running = false;
	rcu_batch_init(&sp->batch_queue);
	rcu_batch_init(&sp->batch_check0);
	rcu_batch_init(&sp->batch_check1);
	rcu_batch_init(&sp->batch_done);
	INIT_DELAYED_WORK(&sp->work, process_srcu);
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
	sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
	return sp->per_cpu_ref ? 0 : -ENOMEM;
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC

int __init_srcu_struct(struct srcu_struct *sp, const char *name,
		       struct lock_class_key *key)
{
	/* Don't re-initialize a lock while it is held. */
	debug_check_no_locks_freed((void *)sp, sizeof(*sp));
	lockdep_init_map(&sp->dep_map, name, key, 0);
	return init_srcu_struct_fields(sp);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

127 128 129 130 131 132 133 134
/**
 * init_srcu_struct - initialize a sleep-RCU structure
 * @sp: structure to initialize.
 *
 * Must invoke this on a given srcu_struct before passing that srcu_struct
 * to any other function.  Each srcu_struct represents a separate domain
 * of SRCU protection.
 */
135
int init_srcu_struct(struct srcu_struct *sp)
136
{
137
	return init_srcu_struct_fields(sp);
138
}
139
EXPORT_SYMBOL_GPL(init_srcu_struct);
140

141 142
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

143 144 145 146 147 148 149 150 151 152 153
/*
 * Returns approximate total of the readers' ->seq[] values for the
 * rank of per-CPU counters specified by idx.
 */
static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
{
	int cpu;
	unsigned long sum = 0;
	unsigned long t;

	for_each_possible_cpu(cpu) {
154
		t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
155 156 157 158 159
		sum += t;
	}
	return sum;
}

160
/*
161
 * Returns approximate number of readers active on the specified rank
162
 * of the per-CPU ->c[] counters.
163
 */
164 165 166 167 168
static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
{
	int cpu;
	unsigned long sum = 0;
	unsigned long t;
169

170
	for_each_possible_cpu(cpu) {
171
		t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
172 173
		sum += t;
	}
174
	return sum;
175 176 177
}

/*
178 179 180 181 182 183 184
 * Return true if the number of pre-existing readers is determined to
 * be stably zero.  An example unstable zero can occur if the call
 * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
 * but due to task migration, sees the corresponding __srcu_read_unlock()
 * decrement.  This can happen because srcu_readers_active_idx() takes
 * time to sum the array, and might in fact be interrupted or preempted
 * partway through the summation.
185 186
 */
static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
187
{
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
	unsigned long seq;

	seq = srcu_readers_seq_idx(sp, idx);

	/*
	 * The following smp_mb() A pairs with the smp_mb() B located in
	 * __srcu_read_lock().  This pairing ensures that if an
	 * __srcu_read_lock() increments its counter after the summation
	 * in srcu_readers_active_idx(), then the corresponding SRCU read-side
	 * critical section will see any changes made prior to the start
	 * of the current SRCU grace period.
	 *
	 * Also, if the above call to srcu_readers_seq_idx() saw the
	 * increment of ->seq[], then the call to srcu_readers_active_idx()
	 * must see the increment of ->c[].
	 */
	smp_mb(); /* A */
205

206 207 208 209 210
	/*
	 * Note that srcu_readers_active_idx() can incorrectly return
	 * zero even though there is a pre-existing reader throughout.
	 * To see this, suppose that task A is in a very long SRCU
	 * read-side critical section that started on CPU 0, and that
211
	 * no other reader exists, so that the sum of the counters
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	 * is equal to one.  Then suppose that task B starts executing
	 * srcu_readers_active_idx(), summing up to CPU 1, and then that
	 * task C starts reading on CPU 0, so that its increment is not
	 * summed, but finishes reading on CPU 2, so that its decrement
	 * -is- summed.  Then when task B completes its sum, it will
	 * incorrectly get zero, despite the fact that task A has been
	 * in its SRCU read-side critical section the whole time.
	 *
	 * We therefore do a validation step should srcu_readers_active_idx()
	 * return zero.
	 */
	if (srcu_readers_active_idx(sp, idx) != 0)
		return false;

	/*
227 228 229 230 231 232
	 * The remainder of this function is the validation step.
	 * The following smp_mb() D pairs with the smp_mb() C in
	 * __srcu_read_unlock().  If the __srcu_read_unlock() was seen
	 * by srcu_readers_active_idx() above, then any destructive
	 * operation performed after the grace period will happen after
	 * the corresponding SRCU read-side critical section.
233
	 *
234 235 236 237 238 239 240 241 242 243 244 245 246 247
	 * Note that there can be at most NR_CPUS worth of readers using
	 * the old index, which is not enough to overflow even a 32-bit
	 * integer.  (Yes, this does mean that systems having more than
	 * a billion or so CPUs need to be 64-bit systems.)  Therefore,
	 * the sum of the ->seq[] counters cannot possibly overflow.
	 * Therefore, the only way that the return values of the two
	 * calls to srcu_readers_seq_idx() can be equal is if there were
	 * no increments of the corresponding rank of ->seq[] counts
	 * in the interim.  But the missed-increment scenario laid out
	 * above includes an increment of the ->seq[] counter by
	 * the corresponding __srcu_read_lock().  Therefore, if this
	 * scenario occurs, the return values from the two calls to
	 * srcu_readers_seq_idx() will differ, and thus the validation
	 * step below suffices.
248
	 */
249 250 251
	smp_mb(); /* D */

	return srcu_readers_seq_idx(sp, idx) == seq;
252 253 254 255 256 257 258 259 260 261
}

/**
 * srcu_readers_active - returns approximate number of readers.
 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
 *
 * Note that this is not an atomic primitive, and can therefore suffer
 * severe errors when invoked on an active srcu_struct.  That said, it
 * can be useful as an error check at cleanup time.
 */
A
Adrian Bunk 已提交
262
static int srcu_readers_active(struct srcu_struct *sp)
263
{
264 265 266 267
	int cpu;
	unsigned long sum = 0;

	for_each_possible_cpu(cpu) {
268 269
		sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
		sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
270 271
	}
	return sum;
272 273 274 275 276 277 278 279 280 281 282
}

/**
 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
 * @sp: structure to clean up.
 *
 * Must invoke this after you are finished using a given srcu_struct that
 * was initialized via init_srcu_struct(), else you leak memory.
 */
void cleanup_srcu_struct(struct srcu_struct *sp)
{
283 284
	if (WARN_ON(srcu_readers_active(sp)))
		return; /* Leakage unless caller handles error. */
285 286 287
	free_percpu(sp->per_cpu_ref);
	sp->per_cpu_ref = NULL;
}
288
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
289

290
/*
291 292 293 294
 * Counts the new reader in the appropriate per-CPU element of the
 * srcu_struct.  Must be called from process context.
 * Returns an index that must be passed to the matching srcu_read_unlock().
 */
295
int __srcu_read_lock(struct srcu_struct *sp)
296 297 298
{
	int idx;

299
	idx = READ_ONCE(sp->completed) & 0x1;
300
	preempt_disable();
301
	__this_cpu_inc(sp->per_cpu_ref->c[idx]);
302
	smp_mb(); /* B */  /* Avoid leaking the critical section. */
303
	__this_cpu_inc(sp->per_cpu_ref->seq[idx]);
304 305 306
	preempt_enable();
	return idx;
}
307
EXPORT_SYMBOL_GPL(__srcu_read_lock);
308

309
/*
310 311 312 313 314
 * Removes the count for the old reader from the appropriate per-CPU
 * element of the srcu_struct.  Note that this may well be a different
 * CPU than that which was incremented by the corresponding srcu_read_lock().
 * Must be called from process context.
 */
315
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
316
{
317
	smp_mb(); /* C */  /* Avoid leaking the critical section. */
318
	this_cpu_dec(sp->per_cpu_ref->c[idx]);
319
}
320
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
321

322 323 324 325 326 327 328 329
/*
 * We use an adaptive strategy for synchronize_srcu() and especially for
 * synchronize_srcu_expedited().  We spin for a fixed time period
 * (defined below) to allow SRCU readers to exit their read-side critical
 * sections.  If there are still some readers after 10 microseconds,
 * we repeatedly block for 1-millisecond time periods.  This approach
 * has done well in testing, so there is no need for a config parameter.
 */
330
#define SRCU_RETRY_CHECK_DELAY		5
331 332
#define SYNCHRONIZE_SRCU_TRYCOUNT	2
#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT	12
333

334
/*
335
 * @@@ Wait until all pre-existing readers complete.  Such readers
336
 * will have used the index specified by "idx".
337 338
 * the caller should ensures the ->completed is not changed while checking
 * and idx = (->completed & 1) ^ 1
339
 */
340
static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
341
{
342 343 344 345 346 347
	for (;;) {
		if (srcu_readers_active_idx_check(sp, idx))
			return true;
		if (--trycount <= 0)
			return false;
		udelay(SRCU_RETRY_CHECK_DELAY);
348 349
	}
}
350

351 352 353 354 355
/*
 * Increment the ->completed counter so that future SRCU readers will
 * use the other rank of the ->c[] and ->seq[] arrays.  This allows
 * us to wait for pre-existing readers in a starvation-free manner.
 */
356
static void srcu_flip(struct srcu_struct *sp)
357
{
358
	sp->completed++;
359 360
}

361 362 363
/*
 * Enqueue an SRCU callback on the specified srcu_struct structure,
 * initiating grace-period processing if it is not already running.
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
 *
 * Note that all CPUs must agree that the grace period extended beyond
 * all pre-existing SRCU read-side critical section.  On systems with
 * more than one CPU, this means that when "func()" is invoked, each CPU
 * is guaranteed to have executed a full memory barrier since the end of
 * its last corresponding SRCU read-side critical section whose beginning
 * preceded the call to call_rcu().  It also means that each CPU executing
 * an SRCU read-side critical section that continues beyond the start of
 * "func()" must have executed a memory barrier after the call_rcu()
 * but before the beginning of that SRCU read-side critical section.
 * Note that these guarantees include CPUs that are offline, idle, or
 * executing in user mode, as well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
 * resulting SRCU callback function "func()", then both CPU A and CPU
 * B are guaranteed to execute a full memory barrier during the time
 * interval between the call to call_rcu() and the invocation of "func()".
 * This guarantee applies even if CPU A and CPU B are the same CPU (but
 * again only if the system has more than one CPU).
 *
 * Of course, these guarantees apply only for invocations of call_srcu(),
 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
 * srcu_struct structure.
387 388 389 390 391 392 393 394 395 396 397 398
 */
void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
		void (*func)(struct rcu_head *head))
{
	unsigned long flags;

	head->next = NULL;
	head->func = func;
	spin_lock_irqsave(&sp->queue_lock, flags);
	rcu_batch_queue(&sp->batch_queue, head);
	if (!sp->running) {
		sp->running = true;
399
		queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
400 401 402 403 404 405 406 407
	}
	spin_unlock_irqrestore(&sp->queue_lock, flags);
}
EXPORT_SYMBOL_GPL(call_srcu);

static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
static void srcu_reschedule(struct srcu_struct *sp);

408 409
/*
 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
410
 */
411
static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
412
{
413 414 415
	struct rcu_synchronize rcu;
	struct rcu_head *head = &rcu.head;
	bool done = false;
416

417 418 419 420 421 422
	rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
			   !lock_is_held(&rcu_bh_lock_map) &&
			   !lock_is_held(&rcu_lock_map) &&
			   !lock_is_held(&rcu_sched_lock_map),
			   "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");

423
	might_sleep();
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	init_completion(&rcu.completion);

	head->next = NULL;
	head->func = wakeme_after_rcu;
	spin_lock_irq(&sp->queue_lock);
	if (!sp->running) {
		/* steal the processing owner */
		sp->running = true;
		rcu_batch_queue(&sp->batch_check0, head);
		spin_unlock_irq(&sp->queue_lock);

		srcu_advance_batches(sp, trycount);
		if (!rcu_batch_empty(&sp->batch_done)) {
			BUG_ON(sp->batch_done.head != head);
			rcu_batch_dequeue(&sp->batch_done);
			done = true;
		}
		/* give the processing owner to work_struct */
		srcu_reschedule(sp);
	} else {
		rcu_batch_queue(&sp->batch_queue, head);
		spin_unlock_irq(&sp->queue_lock);
	}
447

448 449
	if (!done)
		wait_for_completion(&rcu.completion);
450 451
}

452 453 454 455
/**
 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
 * @sp: srcu_struct with which to synchronize.
 *
456 457 458 459 460 461
 * Wait for the count to drain to zero of both indexes. To avoid the
 * possible starvation of synchronize_srcu(), it waits for the count of
 * the index=((->completed & 1) ^ 1) to drain to zero at first,
 * and then flip the completed and wait for the count of the other index.
 *
 * Can block; must be called from process context.
462 463 464 465
 *
 * Note that it is illegal to call synchronize_srcu() from the corresponding
 * SRCU read-side critical section; doing so will result in deadlock.
 * However, it is perfectly legal to call synchronize_srcu() on one
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
 * srcu_struct from some other srcu_struct's read-side critical section,
 * as long as the resulting graph of srcu_structs is acyclic.
 *
 * There are memory-ordering constraints implied by synchronize_srcu().
 * On systems with more than one CPU, when synchronize_srcu() returns,
 * each CPU is guaranteed to have executed a full memory barrier since
 * the end of its last corresponding SRCU-sched read-side critical section
 * whose beginning preceded the call to synchronize_srcu().  In addition,
 * each CPU having an SRCU read-side critical section that extends beyond
 * the return from synchronize_srcu() is guaranteed to have executed a
 * full memory barrier after the beginning of synchronize_srcu() and before
 * the beginning of that SRCU read-side critical section.  Note that these
 * guarantees include CPUs that are offline, idle, or executing in user mode,
 * as well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
 * to have executed a full memory barrier during the execution of
 * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
 * are the same CPU, but again only if the system has more than one CPU.
 *
 * Of course, these memory-ordering guarantees apply only when
 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
 * passed the same srcu_struct structure.
490 491 492
 */
void synchronize_srcu(struct srcu_struct *sp)
{
493
	__synchronize_srcu(sp, rcu_gp_is_expedited()
494 495
			   ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
			   : SYNCHRONIZE_SRCU_TRYCOUNT);
496 497 498 499
}
EXPORT_SYMBOL_GPL(synchronize_srcu);

/**
500
 * synchronize_srcu_expedited - Brute-force SRCU grace period
501 502
 * @sp: srcu_struct with which to synchronize.
 *
503 504
 * Wait for an SRCU grace period to elapse, but be more aggressive about
 * spinning rather than blocking when waiting.
505
 *
506 507
 * Note that synchronize_srcu_expedited() has the same deadlock and
 * memory-ordering properties as does synchronize_srcu().
508 509 510
 */
void synchronize_srcu_expedited(struct srcu_struct *sp)
{
511
	__synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
512 513 514
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);

515 516
/**
 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
517
 * @sp: srcu_struct on which to wait for in-flight callbacks.
518 519 520 521 522 523 524
 */
void srcu_barrier(struct srcu_struct *sp)
{
	synchronize_srcu(sp);
}
EXPORT_SYMBOL_GPL(srcu_barrier);

525 526 527 528 529 530 531
/**
 * srcu_batches_completed - return batches completed.
 * @sp: srcu_struct on which to report batch completion.
 *
 * Report the number of batches, correlated with, but not necessarily
 * precisely the same as, the number of grace periods that have elapsed.
 */
532
unsigned long srcu_batches_completed(struct srcu_struct *sp)
533 534 535 536
{
	return sp->completed;
}
EXPORT_SYMBOL_GPL(srcu_batches_completed);
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657

#define SRCU_CALLBACK_BATCH	10
#define SRCU_INTERVAL		1

/*
 * Move any new SRCU callbacks to the first stage of the SRCU grace
 * period pipeline.
 */
static void srcu_collect_new(struct srcu_struct *sp)
{
	if (!rcu_batch_empty(&sp->batch_queue)) {
		spin_lock_irq(&sp->queue_lock);
		rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
		spin_unlock_irq(&sp->queue_lock);
	}
}

/*
 * Core SRCU state machine.  Advance callbacks from ->batch_check0 to
 * ->batch_check1 and then to ->batch_done as readers drain.
 */
static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
{
	int idx = 1 ^ (sp->completed & 1);

	/*
	 * Because readers might be delayed for an extended period after
	 * fetching ->completed for their index, at any point in time there
	 * might well be readers using both idx=0 and idx=1.  We therefore
	 * need to wait for readers to clear from both index values before
	 * invoking a callback.
	 */

	if (rcu_batch_empty(&sp->batch_check0) &&
	    rcu_batch_empty(&sp->batch_check1))
		return; /* no callbacks need to be advanced */

	if (!try_check_zero(sp, idx, trycount))
		return; /* failed to advance, will try after SRCU_INTERVAL */

	/*
	 * The callbacks in ->batch_check1 have already done with their
	 * first zero check and flip back when they were enqueued on
	 * ->batch_check0 in a previous invocation of srcu_advance_batches().
	 * (Presumably try_check_zero() returned false during that
	 * invocation, leaving the callbacks stranded on ->batch_check1.)
	 * They are therefore ready to invoke, so move them to ->batch_done.
	 */
	rcu_batch_move(&sp->batch_done, &sp->batch_check1);

	if (rcu_batch_empty(&sp->batch_check0))
		return; /* no callbacks need to be advanced */
	srcu_flip(sp);

	/*
	 * The callbacks in ->batch_check0 just finished their
	 * first check zero and flip, so move them to ->batch_check1
	 * for future checking on the other idx.
	 */
	rcu_batch_move(&sp->batch_check1, &sp->batch_check0);

	/*
	 * SRCU read-side critical sections are normally short, so check
	 * at least twice in quick succession after a flip.
	 */
	trycount = trycount < 2 ? 2 : trycount;
	if (!try_check_zero(sp, idx^1, trycount))
		return; /* failed to advance, will try after SRCU_INTERVAL */

	/*
	 * The callbacks in ->batch_check1 have now waited for all
	 * pre-existing readers using both idx values.  They are therefore
	 * ready to invoke, so move them to ->batch_done.
	 */
	rcu_batch_move(&sp->batch_done, &sp->batch_check1);
}

/*
 * Invoke a limited number of SRCU callbacks that have passed through
 * their grace period.  If there are more to do, SRCU will reschedule
 * the workqueue.
 */
static void srcu_invoke_callbacks(struct srcu_struct *sp)
{
	int i;
	struct rcu_head *head;

	for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
		head = rcu_batch_dequeue(&sp->batch_done);
		if (!head)
			break;
		local_bh_disable();
		head->func(head);
		local_bh_enable();
	}
}

/*
 * Finished one round of SRCU grace period.  Start another if there are
 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
 */
static void srcu_reschedule(struct srcu_struct *sp)
{
	bool pending = true;

	if (rcu_batch_empty(&sp->batch_done) &&
	    rcu_batch_empty(&sp->batch_check1) &&
	    rcu_batch_empty(&sp->batch_check0) &&
	    rcu_batch_empty(&sp->batch_queue)) {
		spin_lock_irq(&sp->queue_lock);
		if (rcu_batch_empty(&sp->batch_done) &&
		    rcu_batch_empty(&sp->batch_check1) &&
		    rcu_batch_empty(&sp->batch_check0) &&
		    rcu_batch_empty(&sp->batch_queue)) {
			sp->running = false;
			pending = false;
		}
		spin_unlock_irq(&sp->queue_lock);
	}

	if (pending)
658 659
		queue_delayed_work(system_power_efficient_wq,
				   &sp->work, SRCU_INTERVAL);
660 661 662 663 664
}

/*
 * This is the work-queue function that handles SRCU grace periods.
 */
L
Lai Jiangshan 已提交
665
void process_srcu(struct work_struct *work)
666 667 668 669 670 671 672 673 674 675
{
	struct srcu_struct *sp;

	sp = container_of(work, struct srcu_struct, work.work);

	srcu_collect_new(sp);
	srcu_advance_batches(sp, 1);
	srcu_invoke_callbacks(sp);
	srcu_reschedule(sp);
}
L
Lai Jiangshan 已提交
676
EXPORT_SYMBOL_GPL(process_srcu);