rcupdate.h 31.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Read-Copy Update mechanism for mutual exclusion
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
18
 * Copyright IBM Corporation, 2001
L
Linus Torvalds 已提交
19 20
 *
 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21
 *
22
 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
L
Linus Torvalds 已提交
23 24 25 26 27 28
 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 * Papers:
 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 *
 * For detailed explanation of Read-Copy Update mechanism see -
29
 *		http://lse.sourceforge.net/locking/rcupdate.html
L
Linus Torvalds 已提交
30 31 32 33 34 35
 *
 */

#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H

36
#include <linux/types.h>
L
Linus Torvalds 已提交
37 38 39 40 41
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
42
#include <linux/lockdep.h>
P
Paul E. McKenney 已提交
43
#include <linux/completion.h>
44
#include <linux/debugobjects.h>
45
#include <linux/compiler.h>
L
Linus Torvalds 已提交
46

D
Dave Young 已提交
47 48 49 50
#ifdef CONFIG_RCU_TORTURE_TEST
extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */

51 52 53
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
extern void rcutorture_record_test_transition(void);
extern void rcutorture_record_progress(unsigned long vernum);
54 55
extern void do_trace_rcu_torture_read(char *rcutorturename,
				      struct rcu_head *rhp);
56 57 58 59 60 61 62
#else
static inline void rcutorture_record_test_transition(void)
{
}
static inline void rcutorture_record_progress(unsigned long vernum)
{
}
63 64 65 66 67 68
#ifdef CONFIG_RCU_TRACE
extern void do_trace_rcu_torture_read(char *rcutorturename,
				      struct rcu_head *rhp);
#else
#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
#endif
69 70
#endif

71 72
#define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
#define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
73 74 75
#define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))

76
/* Exported common interfaces */
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139

#ifdef CONFIG_PREEMPT_RCU

/**
 * call_rcu() - Queue an RCU callback for invocation after a grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all pre-existing RCU read-side
 * critical sections have completed.  However, the callback function
 * might well execute concurrently with RCU read-side critical sections
 * that started after call_rcu() was invoked.  RCU read-side critical
 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
 * and may be nested.
 */
extern void call_rcu(struct rcu_head *head,
			      void (*func)(struct rcu_head *head));

#else /* #ifdef CONFIG_PREEMPT_RCU */

/* In classic RCU, call_rcu() is just call_rcu_sched(). */
#define	call_rcu	call_rcu_sched

#endif /* #else #ifdef CONFIG_PREEMPT_RCU */

/**
 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all currently executing RCU
 * read-side critical sections have completed. call_rcu_bh() assumes
 * that the read-side critical sections end on completion of a softirq
 * handler. This means that read-side critical sections in process
 * context must not be interrupted by softirqs. This interface is to be
 * used when most of the read-side critical sections are in softirq context.
 * RCU read-side critical sections are delimited by :
 *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
 *  OR
 *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
 *  These may be nested.
 */
extern void call_rcu_bh(struct rcu_head *head,
			void (*func)(struct rcu_head *head));

/**
 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all currently executing RCU
 * read-side critical sections have completed. call_rcu_sched() assumes
 * that the read-side critical sections end on enabling of preemption
 * or on voluntary preemption.
 * RCU read-side critical sections are delimited by :
 *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
 *  OR
 *  anything that disables preemption.
 *  These may be nested.
 */
140 141
extern void call_rcu_sched(struct rcu_head *head,
			   void (*func)(struct rcu_head *rcu));
142

143
extern void synchronize_sched(void);
144

145 146
#ifdef CONFIG_PREEMPT_RCU

147 148 149 150
extern void __rcu_read_lock(void);
extern void __rcu_read_unlock(void);
void synchronize_rcu(void);

151 152 153 154 155 156 157 158
/*
 * Defined as a macro as it is a very low level header included from
 * areas that don't even know about current.  This gives the rcu_read_lock()
 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
 */
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
#else /* #ifdef CONFIG_PREEMPT_RCU */

static inline void __rcu_read_lock(void)
{
	preempt_disable();
}

static inline void __rcu_read_unlock(void)
{
	preempt_enable();
}

static inline void synchronize_rcu(void)
{
	synchronize_sched();
}

static inline int rcu_preempt_depth(void)
{
	return 0;
}

#endif /* #else #ifdef CONFIG_PREEMPT_RCU */

/* Internal to kernel */
extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
extern void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
188 189 190 191
extern void rcu_idle_enter(void);
extern void rcu_idle_exit(void);
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
192

193 194 195 196 197 198 199 200 201
/*
 * Infrastructure to implement the synchronize_() primitives in
 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
 */

typedef void call_rcu_func_t(struct rcu_head *head,
			     void (*func)(struct rcu_head *head));
void wait_rcu_gp(call_rcu_func_t crf);

202
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
203
#include <linux/rcutree.h>
P
Paul E. McKenney 已提交
204
#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
205
#include <linux/rcutiny.h>
206 207
#else
#error "Unknown RCU implementation specified to kernel configuration"
208
#endif
209

210 211 212 213 214 215 216 217 218 219
/*
 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
 * initialization and destruction of rcu_head on the stack. rcu_head structures
 * allocated dynamically in the heap or defined statically don't need any
 * initialization.
 */
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
extern void init_rcu_head_on_stack(struct rcu_head *head);
extern void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
220 221 222 223 224 225 226
static inline void init_rcu_head_on_stack(struct rcu_head *head)
{
}

static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
{
}
227
#endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
228

229
#ifdef CONFIG_DEBUG_LOCK_ALLOC
230

231 232 233 234 235 236 237 238 239
#ifdef CONFIG_PROVE_RCU
extern int rcu_is_cpu_idle(void);
#else /* !CONFIG_PROVE_RCU */
static inline int rcu_is_cpu_idle(void)
{
	return 0;
}
#endif /* else !CONFIG_PROVE_RCU */

240 241 242 243 244 245 246 247 248 249 250 251
static inline void rcu_lock_acquire(struct lockdep_map *map)
{
	WARN_ON_ONCE(rcu_is_cpu_idle());
	lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
}

static inline void rcu_lock_release(struct lockdep_map *map)
{
	WARN_ON_ONCE(rcu_is_cpu_idle());
	lock_release(map, 1, _THIS_IP_);
}

252
extern struct lockdep_map rcu_lock_map;
253 254
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
255
extern int debug_lockdep_rcu_enabled(void);
256

257
/**
258
 * rcu_read_lock_held() - might we be in RCU read-side critical section?
259
 *
260 261
 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
262
 * this assumes we are in an RCU read-side critical section unless it can
263 264
 * prove otherwise.  This is useful for debug checks in functions that
 * require that they be called within an RCU read-side critical section.
265
 *
266
 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
267
 * and while lockdep is disabled.
268 269 270 271 272
 *
 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
 * occur in the same context, for example, it is illegal to invoke
 * rcu_read_unlock() in process context if the matching rcu_read_lock()
 * was invoked from within an irq handler.
273 274 275
 */
static inline int rcu_read_lock_held(void)
{
276 277
	if (!debug_lockdep_rcu_enabled())
		return 1;
278 279
	if (rcu_is_cpu_idle())
		return 0;
280
	return lock_is_held(&rcu_lock_map);
281 282
}

283 284 285
/*
 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
 * hell.
286
 */
287
extern int rcu_read_lock_bh_held(void);
288 289

/**
290
 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
291
 *
292 293 294 295 296
 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 * RCU-sched read-side critical section.  In absence of
 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 * critical section unless it can prove otherwise.  Note that disabling
 * of preemption (including disabling irqs) counts as an RCU-sched
297 298 299
 * read-side critical section.  This is useful for debug checks in functions
 * that required that they be called within an RCU-sched read-side
 * critical section.
300
 *
301 302
 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 * and while lockdep is disabled.
303 304 305 306 307 308 309 310 311 312 313 314 315
 *
 * Note that if the CPU is in the idle loop from an RCU point of
 * view (ie: that we are in the section between rcu_idle_enter() and
 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 * that are in such a section, considering these as in extended quiescent
 * state, so such a CPU is effectively never in an RCU read-side critical
 * section regardless of what RCU primitives it invokes.  This state of
 * affairs is required --- we need to keep an RCU-free window in idle
 * where the CPU may possibly enter into low power mode. This way we can
 * notice an extended quiescent state to other CPUs that started a grace
 * period. Otherwise we would delay any grace period as long as we run in
 * the idle task.
316
 */
317
#ifdef CONFIG_PREEMPT_COUNT
318 319 320 321
static inline int rcu_read_lock_sched_held(void)
{
	int lockdep_opinion = 0;

322 323
	if (!debug_lockdep_rcu_enabled())
		return 1;
324 325
	if (rcu_is_cpu_idle())
		return 0;
326 327
	if (debug_locks)
		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
328
	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
329
}
330
#else /* #ifdef CONFIG_PREEMPT_COUNT */
331 332 333
static inline int rcu_read_lock_sched_held(void)
{
	return 1;
334
}
335
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
336 337 338

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

339 340
# define rcu_lock_acquire(a)		do { } while (0)
# define rcu_lock_release(a)		do { } while (0)
341 342 343 344 345 346 347 348 349 350 351

static inline int rcu_read_lock_held(void)
{
	return 1;
}

static inline int rcu_read_lock_bh_held(void)
{
	return 1;
}

352
#ifdef CONFIG_PREEMPT_COUNT
353 354
static inline int rcu_read_lock_sched_held(void)
{
355
	return preempt_count() != 0 || irqs_disabled();
356
}
357
#else /* #ifdef CONFIG_PREEMPT_COUNT */
358 359 360
static inline int rcu_read_lock_sched_held(void)
{
	return 1;
361
}
362
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
363 364 365 366 367

#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

#ifdef CONFIG_PROVE_RCU

368 369
extern int rcu_my_thread_group_empty(void);

370 371 372
/**
 * rcu_lockdep_assert - emit lockdep splat if specified condition not met
 * @c: condition to check
373
 * @s: informative message
374
 */
375
#define rcu_lockdep_assert(c, s)					\
376 377 378 379
	do {								\
		static bool __warned;					\
		if (debug_lockdep_rcu_enabled() && !__warned && !(c)) {	\
			__warned = true;				\
380
			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
381 382 383
		}							\
	} while (0)

384 385 386 387 388 389 390 391 392 393 394 395 396
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void)
{
	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
			   "Illegal context switch in RCU read-side "
			   "critical section");
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void)
{
}
#endif /* #else #ifdef CONFIG_PROVE_RCU */

397 398
#define rcu_sleep_check()						\
	do {								\
399
		rcu_preempt_sleep_check();				\
400 401 402 403 404 405 406 407
		rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),	\
				   "Illegal context switch in RCU-bh"	\
				   " read-side critical section");	\
		rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),	\
				   "Illegal context switch in RCU-sched"\
				   " read-side critical section");	\
	} while (0)

408 409
#else /* #ifdef CONFIG_PROVE_RCU */

410 411
#define rcu_lockdep_assert(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0)
412 413 414 415 416 417 418 419 420 421 422

#endif /* #else #ifdef CONFIG_PROVE_RCU */

/*
 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
 * and rcu_assign_pointer().  Some of these could be folded into their
 * callers, but they are left separate in order to ease introduction of
 * multiple flavors of pointers to match the multiple flavors of RCU
 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
 * the future.
 */
423 424 425 426 427 428 429 430

#ifdef __CHECKER__
#define rcu_dereference_sparse(p, space) \
	((void)(((typeof(*p) space *)p) == p))
#else /* #ifdef __CHECKER__ */
#define rcu_dereference_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */

431 432 433
#define __rcu_access_pointer(p, space) \
	({ \
		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
434
		rcu_dereference_sparse(p, space); \
435 436 437 438 439
		((typeof(*p) __force __kernel *)(_________p1)); \
	})
#define __rcu_dereference_check(p, c, space) \
	({ \
		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
440 441
		rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
				      " usage"); \
442
		rcu_dereference_sparse(p, space); \
443 444 445 446 447
		smp_read_barrier_depends(); \
		((typeof(*p) __force __kernel *)(_________p1)); \
	})
#define __rcu_dereference_protected(p, c, space) \
	({ \
448 449
		rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
				      " usage"); \
450
		rcu_dereference_sparse(p, space); \
451 452 453
		((typeof(*p) __force __kernel *)(p)); \
	})

454 455 456 457 458 459
#define __rcu_access_index(p, space) \
	({ \
		typeof(p) _________p1 = ACCESS_ONCE(p); \
		rcu_dereference_sparse(p, space); \
		(_________p1); \
	})
460 461 462
#define __rcu_dereference_index_check(p, c) \
	({ \
		typeof(p) _________p1 = ACCESS_ONCE(p); \
463 464 465
		rcu_lockdep_assert(c, \
				   "suspicious rcu_dereference_index_check()" \
				   " usage"); \
466 467 468 469 470
		smp_read_barrier_depends(); \
		(_________p1); \
	})
#define __rcu_assign_pointer(p, v, space) \
	({ \
471
		smp_wmb(); \
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
		(p) = (typeof(*v) __force space *)(v); \
	})


/**
 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
 * @p: The pointer to read
 *
 * Return the value of the specified RCU-protected pointer, but omit the
 * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
 * when the value of this pointer is accessed, but the pointer is not
 * dereferenced, for example, when testing an RCU-protected pointer against
 * NULL.  Although rcu_access_pointer() may also be used in cases where
 * update-side locks prevent the value of the pointer from changing, you
 * should instead use rcu_dereference_protected() for this use case.
487 488 489 490 491 492 493
 *
 * It is also permissible to use rcu_access_pointer() when read-side
 * access to the pointer was removed at least one grace period ago, as
 * is the case in the context of the RCU callback that is freeing up
 * the data, or after a synchronize_rcu() returns.  This can be useful
 * when tearing down multi-linked structures after a grace period
 * has elapsed.
494 495 496
 */
#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)

497
/**
498
 * rcu_dereference_check() - rcu_dereference with debug checking
499 500
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
501
 *
502
 * Do an rcu_dereference(), but check that the conditions under which the
503 504 505 506 507
 * dereference will take place are correct.  Typically the conditions
 * indicate the various locking conditions that should be held at that
 * point.  The check should return true if the conditions are satisfied.
 * An implicit check for being in an RCU read-side critical section
 * (rcu_read_lock()) is included.
508 509 510
 *
 * For example:
 *
511
 *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
512 513
 *
 * could be used to indicate to lockdep that foo->bar may only be dereferenced
514
 * if either rcu_read_lock() is held, or that the lock required to replace
515 516 517 518 519 520
 * the bar struct at foo->bar is held.
 *
 * Note that the list of conditions may also include indications of when a lock
 * need not be held, for example during initialisation or destruction of the
 * target struct:
 *
521
 *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
522
 *					      atomic_read(&foo->usage) == 0);
523 524 525 526 527 528
 *
 * Inserts memory barriers on architectures that require them
 * (currently only the Alpha), prevents the compiler from refetching
 * (and from merging fetches), and, more importantly, documents exactly
 * which pointers are protected by RCU and checks that the pointer is
 * annotated as __rcu.
529 530
 */
#define rcu_dereference_check(p, c) \
531 532 533 534 535 536 537 538 539 540 541
	__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)

/**
 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
 *
 * This is the RCU-bh counterpart to rcu_dereference_check().
 */
#define rcu_dereference_bh_check(p, c) \
	__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
542

543
/**
544 545 546 547 548 549 550 551 552 553 554 555
 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
 *
 * This is the RCU-sched counterpart to rcu_dereference_check().
 */
#define rcu_dereference_sched_check(p, c) \
	__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
				__rcu)

#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/

556 557 558 559 560 561 562 563 564 565 566 567 568 569
/**
 * rcu_access_index() - fetch RCU index with no dereferencing
 * @p: The index to read
 *
 * Return the value of the specified RCU-protected index, but omit the
 * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
 * when the value of this index is accessed, but the index is not
 * dereferenced, for example, when testing an RCU-protected index against
 * -1.  Although rcu_access_index() may also be used in cases where
 * update-side locks prevent the value of the index from changing, you
 * should instead use rcu_dereference_index_protected() for this use case.
 */
#define rcu_access_index(p) __rcu_access_index((p), __rcu)

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
/**
 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
 *
 * Similar to rcu_dereference_check(), but omits the sparse checking.
 * This allows rcu_dereference_index_check() to be used on integers,
 * which can then be used as array indices.  Attempting to use
 * rcu_dereference_check() on an integer will give compiler warnings
 * because the sparse address-space mechanism relies on dereferencing
 * the RCU-protected pointer.  Dereferencing integers is not something
 * that even gcc will put up with.
 *
 * Note that this function does not implicitly check for RCU read-side
 * critical sections.  If this function gains lots of uses, it might
 * make sense to provide versions for each flavor of RCU, but it does
 * not make sense as of early 2010.
 */
#define rcu_dereference_index_check(p, c) \
	__rcu_dereference_index_check((p), (c))

/**
 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
595 596 597 598 599 600 601 602
 *
 * Return the value of the specified RCU-protected pointer, but omit
 * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
 * is useful in cases where update-side locks prevent the value of the
 * pointer from changing.  Please note that this primitive does -not-
 * prevent the compiler from repeating this reference or combining it
 * with other references, so it should not be used without protection
 * of appropriate locks.
603 604 605 606
 *
 * This function is only for update-side use.  Using this function
 * when protected only by rcu_read_lock() will result in infrequent
 * but very ugly failures.
607 608
 */
#define rcu_dereference_protected(p, c) \
609
	__rcu_dereference_protected((p), (c), __rcu)
610

611

612
/**
613 614
 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
 * @p: The pointer to read, prior to dereferencing
615
 *
616
 * This is a simple wrapper around rcu_dereference_check().
617
 */
618
#define rcu_dereference(p) rcu_dereference_check(p, 0)
619

L
Linus Torvalds 已提交
620
/**
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
 * @p: The pointer to read, prior to dereferencing
 *
 * Makes rcu_dereference_check() do the dirty work.
 */
#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)

/**
 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
 * @p: The pointer to read, prior to dereferencing
 *
 * Makes rcu_dereference_check() do the dirty work.
 */
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)

/**
 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
L
Linus Torvalds 已提交
638
 *
639
 * When synchronize_rcu() is invoked on one CPU while other CPUs
L
Linus Torvalds 已提交
640
 * are within RCU read-side critical sections, then the
641
 * synchronize_rcu() is guaranteed to block until after all the other
L
Linus Torvalds 已提交
642 643 644 645 646 647
 * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
 * on one CPU while other CPUs are within RCU read-side critical
 * sections, invocation of the corresponding RCU callback is deferred
 * until after the all the other CPUs exit their critical sections.
 *
 * Note, however, that RCU callbacks are permitted to run concurrently
648
 * with new RCU read-side critical sections.  One way that this can happen
L
Linus Torvalds 已提交
649 650 651 652 653 654 655 656 657 658 659 660 661 662
 * is via the following sequence of events: (1) CPU 0 enters an RCU
 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
 * callback is invoked.  This is legal, because the RCU read-side critical
 * section that was running concurrently with the call_rcu() (and which
 * therefore might be referencing something that the corresponding RCU
 * callback would free up) has completed before the corresponding
 * RCU callback is invoked.
 *
 * RCU read-side critical sections may be nested.  Any deferred actions
 * will be deferred until the outermost RCU read-side critical section
 * completes.
 *
663 664 665 666 667 668 669 670 671 672 673 674 675 676
 * You can avoid reading and understanding the next paragraph by
 * following this rule: don't put anything in an rcu_read_lock() RCU
 * read-side critical section that would block in a !PREEMPT kernel.
 * But if you want the full story, read on!
 *
 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
 * is illegal to block while in an RCU read-side critical section.  In
 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
 * be preempted, but explicit blocking is illegal.  Finally, in preemptible
 * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
 * RCU read-side critical sections may be preempted and they may also
 * block, but only when acquiring spinlocks that are subject to priority
 * inheritance.
L
Linus Torvalds 已提交
677
 */
678 679 680 681
static inline void rcu_read_lock(void)
{
	__rcu_read_lock();
	__acquire(RCU);
682
	rcu_lock_acquire(&rcu_lock_map);
683
}
L
Linus Torvalds 已提交
684 685 686 687 688 689 690 691 692 693

/*
 * So where is rcu_write_lock()?  It does not exist, as there is no
 * way for writers to lock out RCU readers.  This is a feature, not
 * a bug -- this property is what provides RCU's performance benefits.
 * Of course, writers must coordinate with each other.  The normal
 * spinlock primitives work well for this, but any other technique may be
 * used as well.  RCU does not care how the writers keep out of each
 * others' way, as long as they do so.
 */
694 695

/**
696
 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
697 698 699
 *
 * See rcu_read_lock() for more information.
 */
700 701
static inline void rcu_read_unlock(void)
{
702
	rcu_lock_release(&rcu_lock_map);
703 704 705
	__release(RCU);
	__rcu_read_unlock();
}
L
Linus Torvalds 已提交
706 707

/**
708
 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
L
Linus Torvalds 已提交
709 710
 *
 * This is equivalent of rcu_read_lock(), but to be used when updates
711 712 713 714 715 716 717
 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
 * softirq handler to be a quiescent state, a process in RCU read-side
 * critical section must be protected by disabling softirqs. Read-side
 * critical sections in interrupt context can use just rcu_read_lock(),
 * though this should at least be commented to avoid confusing people
 * reading the code.
718 719 720 721 722
 *
 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
 * must occur in the same context, for example, it is illegal to invoke
 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
 * was invoked from some other task.
L
Linus Torvalds 已提交
723
 */
724 725
static inline void rcu_read_lock_bh(void)
{
726
	local_bh_disable();
727
	__acquire(RCU_BH);
728
	rcu_lock_acquire(&rcu_bh_lock_map);
729
}
L
Linus Torvalds 已提交
730 731 732 733 734 735

/*
 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
 *
 * See rcu_read_lock_bh() for more information.
 */
736 737
static inline void rcu_read_unlock_bh(void)
{
738
	rcu_lock_release(&rcu_bh_lock_map);
739
	__release(RCU_BH);
740
	local_bh_enable();
741
}
L
Linus Torvalds 已提交
742

743
/**
744
 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
745
 *
746 747 748 749
 * This is equivalent of rcu_read_lock(), but to be used when updates
 * are being done using call_rcu_sched() or synchronize_rcu_sched().
 * Read-side critical sections can also be introduced by anything that
 * disables preemption, including local_irq_disable() and friends.
750 751 752 753 754
 *
 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
 * must occur in the same context, for example, it is illegal to invoke
 * rcu_read_unlock_sched() from process context if the matching
 * rcu_read_lock_sched() was invoked from an NMI handler.
755
 */
756 757 758
static inline void rcu_read_lock_sched(void)
{
	preempt_disable();
759
	__acquire(RCU_SCHED);
760
	rcu_lock_acquire(&rcu_sched_lock_map);
761
}
762 763

/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
764
static inline notrace void rcu_read_lock_sched_notrace(void)
765 766
{
	preempt_disable_notrace();
767
	__acquire(RCU_SCHED);
768
}
769 770 771 772 773 774

/*
 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
 *
 * See rcu_read_lock_sched for more information.
 */
775 776
static inline void rcu_read_unlock_sched(void)
{
777
	rcu_lock_release(&rcu_sched_lock_map);
778
	__release(RCU_SCHED);
779 780
	preempt_enable();
}
781 782

/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
783
static inline notrace void rcu_read_unlock_sched_notrace(void)
784
{
785
	__release(RCU_SCHED);
786 787
	preempt_enable_notrace();
}
788

L
Linus Torvalds 已提交
789
/**
790 791 792
 * rcu_assign_pointer() - assign to RCU-protected pointer
 * @p: pointer to assign to
 * @v: value to assign (publish)
793
 *
794 795 796
 * Assigns the specified value to the specified RCU-protected
 * pointer, ensuring that any concurrent RCU readers will see
 * any prior initialization.  Returns the value assigned.
L
Linus Torvalds 已提交
797 798
 *
 * Inserts memory barriers on architectures that require them
799 800 801 802 803 804 805 806 807 808 809 810
 * (which is most of them), and also prevents the compiler from
 * reordering the code that initializes the structure after the pointer
 * assignment.  More importantly, this call documents which pointers
 * will be dereferenced by RCU read-side code.
 *
 * In some special cases, you may use RCU_INIT_POINTER() instead
 * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
 * to the fact that it does not constrain either the CPU or the compiler.
 * That said, using RCU_INIT_POINTER() when you should have used
 * rcu_assign_pointer() is a very bad thing that results in
 * impossible-to-diagnose memory corruption.  So please be careful.
 * See the RCU_INIT_POINTER() comment header for details.
L
Linus Torvalds 已提交
811
 */
812
#define rcu_assign_pointer(p, v) \
813 814 815 816 817
	__rcu_assign_pointer((p), (v), __rcu)

/**
 * RCU_INIT_POINTER() - initialize an RCU protected pointer
 *
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
 * Initialize an RCU-protected pointer in special cases where readers
 * do not need ordering constraints on the CPU or the compiler.  These
 * special cases are:
 *
 * 1.	This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
 * 2.	The caller has taken whatever steps are required to prevent
 *	RCU readers from concurrently accessing this pointer -or-
 * 3.	The referenced data structure has already been exposed to
 *	readers either at compile time or via rcu_assign_pointer() -and-
 *	a.	You have not made -any- reader-visible changes to
 *		this structure since then -or-
 *	b.	It is OK for readers accessing this structure from its
 *		new location to see the old state of the structure.  (For
 *		example, the changes were to statistical counters or to
 *		other state where exact synchronization is not required.)
 *
 * Failure to follow these rules governing use of RCU_INIT_POINTER() will
 * result in impossible-to-diagnose memory corruption.  As in the structures
 * will look OK in crash dumps, but any concurrent RCU readers might
 * see pre-initialized values of the referenced data structure.  So
 * please be very careful how you use RCU_INIT_POINTER()!!!
 *
 * If you are creating an RCU-protected linked structure that is accessed
 * by a single external-to-structure RCU-protected pointer, then you may
 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
 * pointers, but you must use rcu_assign_pointer() to initialize the
 * external-to-structure pointer -after- you have completely initialized
 * the reader-accessible portions of the linked structure.
846 847 848
 */
#define RCU_INIT_POINTER(p, v) \
		p = (typeof(*v) __force __rcu *)(v)
L
Linus Torvalds 已提交
849

L
Lai Jiangshan 已提交
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
{
	return offset < 4096;
}

static __always_inline
void __kfree_rcu(struct rcu_head *head, unsigned long offset)
{
	typedef void (*rcu_callback)(struct rcu_head *);

	BUILD_BUG_ON(!__builtin_constant_p(offset));

	/* See the kfree_rcu() header comment. */
	BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));

865
	kfree_call_rcu(head, (rcu_callback)offset);
L
Lai Jiangshan 已提交
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
}

/**
 * kfree_rcu() - kfree an object after a grace period.
 * @ptr:	pointer to kfree
 * @rcu_head:	the name of the struct rcu_head within the type of @ptr.
 *
 * Many rcu callbacks functions just call kfree() on the base structure.
 * These functions are trivial, but their size adds up, and furthermore
 * when they are used in a kernel module, that module must invoke the
 * high-latency rcu_barrier() function at module-unload time.
 *
 * The kfree_rcu() function handles this issue.  Rather than encoding a
 * function address in the embedded rcu_head structure, kfree_rcu() instead
 * encodes the offset of the rcu_head structure within the base structure.
 * Because the functions are not allowed in the low-order 4096 bytes of
 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
 * If the offset is larger than 4095 bytes, a compile-time error will
 * be generated in __kfree_rcu().  If this error is triggered, you can
 * either fall back to use of call_rcu() or rearrange the structure to
 * position the rcu_head structure into the first 4096 bytes.
 *
 * Note that the allowable offset might decrease in the future, for example,
 * to allow something like kmem_cache_free_rcu().
 */
#define kfree_rcu(ptr, rcu_head)					\
	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))

L
Linus Torvalds 已提交
894
#endif /* __LINUX_RCUPDATE_H */