rcupdate.h 35.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Read-Copy Update mechanism for mutual exclusion
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
18
 * Copyright IBM Corporation, 2001
L
Linus Torvalds 已提交
19 20
 *
 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21
 *
22
 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
L
Linus Torvalds 已提交
23 24 25 26 27 28
 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 * Papers:
 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 *
 * For detailed explanation of Read-Copy Update mechanism see -
29
 *		http://lse.sourceforge.net/locking/rcupdate.html
L
Linus Torvalds 已提交
30 31 32 33 34 35
 *
 */

#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H

36
#include <linux/types.h>
L
Linus Torvalds 已提交
37 38 39 40 41
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
42
#include <linux/lockdep.h>
P
Paul E. McKenney 已提交
43
#include <linux/completion.h>
44
#include <linux/debugobjects.h>
45
#include <linux/bug.h>
46
#include <linux/compiler.h>
L
Linus Torvalds 已提交
47

D
Dave Young 已提交
48 49 50 51
#ifdef CONFIG_RCU_TORTURE_TEST
extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */

52 53 54
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
extern void rcutorture_record_test_transition(void);
extern void rcutorture_record_progress(unsigned long vernum);
55 56
extern void do_trace_rcu_torture_read(char *rcutorturename,
				      struct rcu_head *rhp);
57 58 59 60 61 62 63
#else
static inline void rcutorture_record_test_transition(void)
{
}
static inline void rcutorture_record_progress(unsigned long vernum)
{
}
64 65 66 67 68 69
#ifdef CONFIG_RCU_TRACE
extern void do_trace_rcu_torture_read(char *rcutorturename,
				      struct rcu_head *rhp);
#else
#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
#endif
70 71
#endif

72 73
#define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
#define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
74 75 76
#define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))

77
/* Exported common interfaces */
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140

#ifdef CONFIG_PREEMPT_RCU

/**
 * call_rcu() - Queue an RCU callback for invocation after a grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all pre-existing RCU read-side
 * critical sections have completed.  However, the callback function
 * might well execute concurrently with RCU read-side critical sections
 * that started after call_rcu() was invoked.  RCU read-side critical
 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
 * and may be nested.
 */
extern void call_rcu(struct rcu_head *head,
			      void (*func)(struct rcu_head *head));

#else /* #ifdef CONFIG_PREEMPT_RCU */

/* In classic RCU, call_rcu() is just call_rcu_sched(). */
#define	call_rcu	call_rcu_sched

#endif /* #else #ifdef CONFIG_PREEMPT_RCU */

/**
 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all currently executing RCU
 * read-side critical sections have completed. call_rcu_bh() assumes
 * that the read-side critical sections end on completion of a softirq
 * handler. This means that read-side critical sections in process
 * context must not be interrupted by softirqs. This interface is to be
 * used when most of the read-side critical sections are in softirq context.
 * RCU read-side critical sections are delimited by :
 *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
 *  OR
 *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
 *  These may be nested.
 */
extern void call_rcu_bh(struct rcu_head *head,
			void (*func)(struct rcu_head *head));

/**
 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all currently executing RCU
 * read-side critical sections have completed. call_rcu_sched() assumes
 * that the read-side critical sections end on enabling of preemption
 * or on voluntary preemption.
 * RCU read-side critical sections are delimited by :
 *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
 *  OR
 *  anything that disables preemption.
 *  These may be nested.
 */
141 142
extern void call_rcu_sched(struct rcu_head *head,
			   void (*func)(struct rcu_head *rcu));
143

144
extern void synchronize_sched(void);
145

146 147
#ifdef CONFIG_PREEMPT_RCU

148 149
extern void __rcu_read_lock(void);
extern void __rcu_read_unlock(void);
150
extern void rcu_read_unlock_special(struct task_struct *t);
151 152
void synchronize_rcu(void);

153 154 155 156 157 158 159 160
/*
 * Defined as a macro as it is a very low level header included from
 * areas that don't even know about current.  This gives the rcu_read_lock()
 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
 */
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
#else /* #ifdef CONFIG_PREEMPT_RCU */

static inline void __rcu_read_lock(void)
{
	preempt_disable();
}

static inline void __rcu_read_unlock(void)
{
	preempt_enable();
}

static inline void synchronize_rcu(void)
{
	synchronize_sched();
}

static inline int rcu_preempt_depth(void)
{
	return 0;
}

#endif /* #else #ifdef CONFIG_PREEMPT_RCU */

/* Internal to kernel */
extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
extern void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
190 191 192 193
extern void rcu_idle_enter(void);
extern void rcu_idle_exit(void);
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
194 195

#ifdef CONFIG_RCU_USER_QS
196 197
extern void rcu_user_enter(void);
extern void rcu_user_exit(void);
198 199
extern void rcu_user_enter_after_irq(void);
extern void rcu_user_exit_after_irq(void);
200 201
extern void rcu_user_hooks_switch(struct task_struct *prev,
				  struct task_struct *next);
202 203 204 205 206 207 208
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_enter_after_irq(void) { }
static inline void rcu_user_exit_after_irq(void) { }
#endif /* CONFIG_RCU_USER_QS */

209
extern void exit_rcu(void);
210

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
/**
 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
 * @a: Code that RCU needs to pay attention to.
 *
 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
 * in the inner idle loop, that is, between the rcu_idle_enter() and
 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
 * critical sections.  However, things like powertop need tracepoints
 * in the inner idle loop.
 *
 * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
 * will tell RCU that it needs to pay attending, invoke its argument
 * (in this example, a call to the do_something_with_RCU() function),
 * and then tell RCU to go back to ignoring this CPU.  It is permissible
 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
 * quite limited.  If deeper nesting is required, it will be necessary
 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
 */
#define RCU_NONIDLE(a) \
	do { \
231
		rcu_irq_enter(); \
232
		do { a; } while (0); \
233
		rcu_irq_exit(); \
234 235
	} while (0)

236 237 238 239 240 241 242 243 244
/*
 * Infrastructure to implement the synchronize_() primitives in
 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
 */

typedef void call_rcu_func_t(struct rcu_head *head,
			     void (*func)(struct rcu_head *head));
void wait_rcu_gp(call_rcu_func_t crf);

245
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
246
#include <linux/rcutree.h>
P
Paul E. McKenney 已提交
247
#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
248
#include <linux/rcutiny.h>
249 250
#else
#error "Unknown RCU implementation specified to kernel configuration"
251
#endif
252

253 254 255 256 257 258 259 260 261 262
/*
 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
 * initialization and destruction of rcu_head on the stack. rcu_head structures
 * allocated dynamically in the heap or defined statically don't need any
 * initialization.
 */
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
extern void init_rcu_head_on_stack(struct rcu_head *head);
extern void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
263 264 265 266 267 268 269
static inline void init_rcu_head_on_stack(struct rcu_head *head)
{
}

static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
{
}
270
#endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
271

272 273 274 275
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
extern int rcu_is_cpu_idle(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */

276 277 278 279 280 281 282 283 284
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
static inline bool rcu_lockdep_current_cpu_online(void)
{
	return 1;
}
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */

285
#ifdef CONFIG_DEBUG_LOCK_ALLOC
286

287 288 289 290 291 292 293 294 295 296
static inline void rcu_lock_acquire(struct lockdep_map *map)
{
	lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
}

static inline void rcu_lock_release(struct lockdep_map *map)
{
	lock_release(map, 1, _THIS_IP_);
}

297
extern struct lockdep_map rcu_lock_map;
298 299
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
300
extern int debug_lockdep_rcu_enabled(void);
301

302
/**
303
 * rcu_read_lock_held() - might we be in RCU read-side critical section?
304
 *
305 306
 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
307
 * this assumes we are in an RCU read-side critical section unless it can
308 309
 * prove otherwise.  This is useful for debug checks in functions that
 * require that they be called within an RCU read-side critical section.
310
 *
311
 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
312
 * and while lockdep is disabled.
313 314 315 316 317
 *
 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
 * occur in the same context, for example, it is illegal to invoke
 * rcu_read_unlock() in process context if the matching rcu_read_lock()
 * was invoked from within an irq handler.
318 319 320
 *
 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
 * offline from an RCU perspective, so check for those as well.
321 322 323
 */
static inline int rcu_read_lock_held(void)
{
324 325
	if (!debug_lockdep_rcu_enabled())
		return 1;
326 327
	if (rcu_is_cpu_idle())
		return 0;
328 329
	if (!rcu_lockdep_current_cpu_online())
		return 0;
330
	return lock_is_held(&rcu_lock_map);
331 332
}

333 334 335
/*
 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
 * hell.
336
 */
337
extern int rcu_read_lock_bh_held(void);
338 339

/**
340
 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
341
 *
342 343 344 345 346
 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 * RCU-sched read-side critical section.  In absence of
 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 * critical section unless it can prove otherwise.  Note that disabling
 * of preemption (including disabling irqs) counts as an RCU-sched
347 348 349
 * read-side critical section.  This is useful for debug checks in functions
 * that required that they be called within an RCU-sched read-side
 * critical section.
350
 *
351 352
 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 * and while lockdep is disabled.
353 354 355 356 357 358 359 360 361 362 363 364 365
 *
 * Note that if the CPU is in the idle loop from an RCU point of
 * view (ie: that we are in the section between rcu_idle_enter() and
 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 * that are in such a section, considering these as in extended quiescent
 * state, so such a CPU is effectively never in an RCU read-side critical
 * section regardless of what RCU primitives it invokes.  This state of
 * affairs is required --- we need to keep an RCU-free window in idle
 * where the CPU may possibly enter into low power mode. This way we can
 * notice an extended quiescent state to other CPUs that started a grace
 * period. Otherwise we would delay any grace period as long as we run in
 * the idle task.
366 367 368
 *
 * Similarly, we avoid claiming an SRCU read lock held if the current
 * CPU is offline.
369
 */
370
#ifdef CONFIG_PREEMPT_COUNT
371 372 373 374
static inline int rcu_read_lock_sched_held(void)
{
	int lockdep_opinion = 0;

375 376
	if (!debug_lockdep_rcu_enabled())
		return 1;
377 378
	if (rcu_is_cpu_idle())
		return 0;
379 380
	if (!rcu_lockdep_current_cpu_online())
		return 0;
381 382
	if (debug_locks)
		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
383
	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
384
}
385
#else /* #ifdef CONFIG_PREEMPT_COUNT */
386 387 388
static inline int rcu_read_lock_sched_held(void)
{
	return 1;
389
}
390
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
391 392 393

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

394 395
# define rcu_lock_acquire(a)		do { } while (0)
# define rcu_lock_release(a)		do { } while (0)
396 397 398 399 400 401 402 403 404 405 406

static inline int rcu_read_lock_held(void)
{
	return 1;
}

static inline int rcu_read_lock_bh_held(void)
{
	return 1;
}

407
#ifdef CONFIG_PREEMPT_COUNT
408 409
static inline int rcu_read_lock_sched_held(void)
{
410
	return preempt_count() != 0 || irqs_disabled();
411
}
412
#else /* #ifdef CONFIG_PREEMPT_COUNT */
413 414 415
static inline int rcu_read_lock_sched_held(void)
{
	return 1;
416
}
417
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
418 419 420 421 422

#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

#ifdef CONFIG_PROVE_RCU

423 424
extern int rcu_my_thread_group_empty(void);

425 426 427
/**
 * rcu_lockdep_assert - emit lockdep splat if specified condition not met
 * @c: condition to check
428
 * @s: informative message
429
 */
430
#define rcu_lockdep_assert(c, s)					\
431
	do {								\
432
		static bool __section(.data.unlikely) __warned;		\
433 434
		if (debug_lockdep_rcu_enabled() && !__warned && !(c)) {	\
			__warned = true;				\
435
			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
436 437 438
		}							\
	} while (0)

439 440 441 442
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void)
{
	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
443
			   "Illegal context switch in RCU read-side critical section");
444 445 446 447 448 449 450
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void)
{
}
#endif /* #else #ifdef CONFIG_PROVE_RCU */

451 452
#define rcu_sleep_check()						\
	do {								\
453
		rcu_preempt_sleep_check();				\
454 455 456 457 458 459 460 461
		rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),	\
				   "Illegal context switch in RCU-bh"	\
				   " read-side critical section");	\
		rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),	\
				   "Illegal context switch in RCU-sched"\
				   " read-side critical section");	\
	} while (0)

462 463
#else /* #ifdef CONFIG_PROVE_RCU */

464 465
#define rcu_lockdep_assert(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0)
466 467 468 469 470 471 472 473 474 475 476

#endif /* #else #ifdef CONFIG_PROVE_RCU */

/*
 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
 * and rcu_assign_pointer().  Some of these could be folded into their
 * callers, but they are left separate in order to ease introduction of
 * multiple flavors of pointers to match the multiple flavors of RCU
 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
 * the future.
 */
477 478 479 480 481 482 483 484

#ifdef __CHECKER__
#define rcu_dereference_sparse(p, space) \
	((void)(((typeof(*p) space *)p) == p))
#else /* #ifdef __CHECKER__ */
#define rcu_dereference_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */

485 486 487
#define __rcu_access_pointer(p, space) \
	({ \
		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
488
		rcu_dereference_sparse(p, space); \
489 490 491 492 493
		((typeof(*p) __force __kernel *)(_________p1)); \
	})
#define __rcu_dereference_check(p, c, space) \
	({ \
		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
494 495
		rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
				      " usage"); \
496
		rcu_dereference_sparse(p, space); \
497 498 499 500 501
		smp_read_barrier_depends(); \
		((typeof(*p) __force __kernel *)(_________p1)); \
	})
#define __rcu_dereference_protected(p, c, space) \
	({ \
502 503
		rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
				      " usage"); \
504
		rcu_dereference_sparse(p, space); \
505 506 507
		((typeof(*p) __force __kernel *)(p)); \
	})

508 509 510 511 512 513
#define __rcu_access_index(p, space) \
	({ \
		typeof(p) _________p1 = ACCESS_ONCE(p); \
		rcu_dereference_sparse(p, space); \
		(_________p1); \
	})
514 515 516
#define __rcu_dereference_index_check(p, c) \
	({ \
		typeof(p) _________p1 = ACCESS_ONCE(p); \
517 518 519
		rcu_lockdep_assert(c, \
				   "suspicious rcu_dereference_index_check()" \
				   " usage"); \
520 521 522 523
		smp_read_barrier_depends(); \
		(_________p1); \
	})
#define __rcu_assign_pointer(p, v, space) \
524
	do { \
525
		smp_wmb(); \
526
		(p) = (typeof(*v) __force space *)(v); \
527
	} while (0)
528 529 530 531 532 533 534 535 536 537 538 539 540


/**
 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
 * @p: The pointer to read
 *
 * Return the value of the specified RCU-protected pointer, but omit the
 * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
 * when the value of this pointer is accessed, but the pointer is not
 * dereferenced, for example, when testing an RCU-protected pointer against
 * NULL.  Although rcu_access_pointer() may also be used in cases where
 * update-side locks prevent the value of the pointer from changing, you
 * should instead use rcu_dereference_protected() for this use case.
541 542 543 544 545 546 547
 *
 * It is also permissible to use rcu_access_pointer() when read-side
 * access to the pointer was removed at least one grace period ago, as
 * is the case in the context of the RCU callback that is freeing up
 * the data, or after a synchronize_rcu() returns.  This can be useful
 * when tearing down multi-linked structures after a grace period
 * has elapsed.
548 549 550
 */
#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)

551
/**
552
 * rcu_dereference_check() - rcu_dereference with debug checking
553 554
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
555
 *
556
 * Do an rcu_dereference(), but check that the conditions under which the
557 558 559 560 561
 * dereference will take place are correct.  Typically the conditions
 * indicate the various locking conditions that should be held at that
 * point.  The check should return true if the conditions are satisfied.
 * An implicit check for being in an RCU read-side critical section
 * (rcu_read_lock()) is included.
562 563 564
 *
 * For example:
 *
565
 *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
566 567
 *
 * could be used to indicate to lockdep that foo->bar may only be dereferenced
568
 * if either rcu_read_lock() is held, or that the lock required to replace
569 570 571 572 573 574
 * the bar struct at foo->bar is held.
 *
 * Note that the list of conditions may also include indications of when a lock
 * need not be held, for example during initialisation or destruction of the
 * target struct:
 *
575
 *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
576
 *					      atomic_read(&foo->usage) == 0);
577 578 579 580 581 582
 *
 * Inserts memory barriers on architectures that require them
 * (currently only the Alpha), prevents the compiler from refetching
 * (and from merging fetches), and, more importantly, documents exactly
 * which pointers are protected by RCU and checks that the pointer is
 * annotated as __rcu.
583 584
 */
#define rcu_dereference_check(p, c) \
585 586 587 588 589 590 591 592 593 594 595
	__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)

/**
 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
 *
 * This is the RCU-bh counterpart to rcu_dereference_check().
 */
#define rcu_dereference_bh_check(p, c) \
	__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
596

597
/**
598 599 600 601 602 603 604 605 606 607 608 609
 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
 *
 * This is the RCU-sched counterpart to rcu_dereference_check().
 */
#define rcu_dereference_sched_check(p, c) \
	__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
				__rcu)

#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/

610 611 612 613 614 615 616 617 618 619 620 621 622 623
/**
 * rcu_access_index() - fetch RCU index with no dereferencing
 * @p: The index to read
 *
 * Return the value of the specified RCU-protected index, but omit the
 * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
 * when the value of this index is accessed, but the index is not
 * dereferenced, for example, when testing an RCU-protected index against
 * -1.  Although rcu_access_index() may also be used in cases where
 * update-side locks prevent the value of the index from changing, you
 * should instead use rcu_dereference_index_protected() for this use case.
 */
#define rcu_access_index(p) __rcu_access_index((p), __rcu)

624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
/**
 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
 *
 * Similar to rcu_dereference_check(), but omits the sparse checking.
 * This allows rcu_dereference_index_check() to be used on integers,
 * which can then be used as array indices.  Attempting to use
 * rcu_dereference_check() on an integer will give compiler warnings
 * because the sparse address-space mechanism relies on dereferencing
 * the RCU-protected pointer.  Dereferencing integers is not something
 * that even gcc will put up with.
 *
 * Note that this function does not implicitly check for RCU read-side
 * critical sections.  If this function gains lots of uses, it might
 * make sense to provide versions for each flavor of RCU, but it does
 * not make sense as of early 2010.
 */
#define rcu_dereference_index_check(p, c) \
	__rcu_dereference_index_check((p), (c))

/**
 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
 * @p: The pointer to read, prior to dereferencing
 * @c: The conditions under which the dereference will take place
649 650 651 652 653 654 655 656
 *
 * Return the value of the specified RCU-protected pointer, but omit
 * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
 * is useful in cases where update-side locks prevent the value of the
 * pointer from changing.  Please note that this primitive does -not-
 * prevent the compiler from repeating this reference or combining it
 * with other references, so it should not be used without protection
 * of appropriate locks.
657 658 659 660
 *
 * This function is only for update-side use.  Using this function
 * when protected only by rcu_read_lock() will result in infrequent
 * but very ugly failures.
661 662
 */
#define rcu_dereference_protected(p, c) \
663
	__rcu_dereference_protected((p), (c), __rcu)
664

665

666
/**
667 668
 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
 * @p: The pointer to read, prior to dereferencing
669
 *
670
 * This is a simple wrapper around rcu_dereference_check().
671
 */
672
#define rcu_dereference(p) rcu_dereference_check(p, 0)
673

L
Linus Torvalds 已提交
674
/**
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
 * @p: The pointer to read, prior to dereferencing
 *
 * Makes rcu_dereference_check() do the dirty work.
 */
#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)

/**
 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
 * @p: The pointer to read, prior to dereferencing
 *
 * Makes rcu_dereference_check() do the dirty work.
 */
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)

/**
 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
L
Linus Torvalds 已提交
692
 *
693
 * When synchronize_rcu() is invoked on one CPU while other CPUs
L
Linus Torvalds 已提交
694
 * are within RCU read-side critical sections, then the
695
 * synchronize_rcu() is guaranteed to block until after all the other
L
Linus Torvalds 已提交
696 697 698 699 700 701
 * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
 * on one CPU while other CPUs are within RCU read-side critical
 * sections, invocation of the corresponding RCU callback is deferred
 * until after the all the other CPUs exit their critical sections.
 *
 * Note, however, that RCU callbacks are permitted to run concurrently
702
 * with new RCU read-side critical sections.  One way that this can happen
L
Linus Torvalds 已提交
703 704 705 706 707 708 709 710 711 712 713 714 715 716
 * is via the following sequence of events: (1) CPU 0 enters an RCU
 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
 * callback is invoked.  This is legal, because the RCU read-side critical
 * section that was running concurrently with the call_rcu() (and which
 * therefore might be referencing something that the corresponding RCU
 * callback would free up) has completed before the corresponding
 * RCU callback is invoked.
 *
 * RCU read-side critical sections may be nested.  Any deferred actions
 * will be deferred until the outermost RCU read-side critical section
 * completes.
 *
717 718 719 720 721 722 723 724 725 726 727 728 729 730
 * You can avoid reading and understanding the next paragraph by
 * following this rule: don't put anything in an rcu_read_lock() RCU
 * read-side critical section that would block in a !PREEMPT kernel.
 * But if you want the full story, read on!
 *
 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
 * is illegal to block while in an RCU read-side critical section.  In
 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
 * be preempted, but explicit blocking is illegal.  Finally, in preemptible
 * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
 * RCU read-side critical sections may be preempted and they may also
 * block, but only when acquiring spinlocks that are subject to priority
 * inheritance.
L
Linus Torvalds 已提交
731
 */
732 733 734 735
static inline void rcu_read_lock(void)
{
	__rcu_read_lock();
	__acquire(RCU);
736
	rcu_lock_acquire(&rcu_lock_map);
737 738
	rcu_lockdep_assert(!rcu_is_cpu_idle(),
			   "rcu_read_lock() used illegally while idle");
739
}
L
Linus Torvalds 已提交
740 741 742 743 744 745 746 747 748 749

/*
 * So where is rcu_write_lock()?  It does not exist, as there is no
 * way for writers to lock out RCU readers.  This is a feature, not
 * a bug -- this property is what provides RCU's performance benefits.
 * Of course, writers must coordinate with each other.  The normal
 * spinlock primitives work well for this, but any other technique may be
 * used as well.  RCU does not care how the writers keep out of each
 * others' way, as long as they do so.
 */
750 751

/**
752
 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
753 754 755
 *
 * See rcu_read_lock() for more information.
 */
756 757
static inline void rcu_read_unlock(void)
{
758 759
	rcu_lockdep_assert(!rcu_is_cpu_idle(),
			   "rcu_read_unlock() used illegally while idle");
760
	rcu_lock_release(&rcu_lock_map);
761 762 763
	__release(RCU);
	__rcu_read_unlock();
}
L
Linus Torvalds 已提交
764 765

/**
766
 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
L
Linus Torvalds 已提交
767 768
 *
 * This is equivalent of rcu_read_lock(), but to be used when updates
769 770 771 772 773 774 775
 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
 * softirq handler to be a quiescent state, a process in RCU read-side
 * critical section must be protected by disabling softirqs. Read-side
 * critical sections in interrupt context can use just rcu_read_lock(),
 * though this should at least be commented to avoid confusing people
 * reading the code.
776 777 778 779 780
 *
 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
 * must occur in the same context, for example, it is illegal to invoke
 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
 * was invoked from some other task.
L
Linus Torvalds 已提交
781
 */
782 783
static inline void rcu_read_lock_bh(void)
{
784
	local_bh_disable();
785
	__acquire(RCU_BH);
786
	rcu_lock_acquire(&rcu_bh_lock_map);
787 788
	rcu_lockdep_assert(!rcu_is_cpu_idle(),
			   "rcu_read_lock_bh() used illegally while idle");
789
}
L
Linus Torvalds 已提交
790 791 792 793 794 795

/*
 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
 *
 * See rcu_read_lock_bh() for more information.
 */
796 797
static inline void rcu_read_unlock_bh(void)
{
798 799
	rcu_lockdep_assert(!rcu_is_cpu_idle(),
			   "rcu_read_unlock_bh() used illegally while idle");
800
	rcu_lock_release(&rcu_bh_lock_map);
801
	__release(RCU_BH);
802
	local_bh_enable();
803
}
L
Linus Torvalds 已提交
804

805
/**
806
 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
807
 *
808 809 810 811
 * This is equivalent of rcu_read_lock(), but to be used when updates
 * are being done using call_rcu_sched() or synchronize_rcu_sched().
 * Read-side critical sections can also be introduced by anything that
 * disables preemption, including local_irq_disable() and friends.
812 813 814 815 816
 *
 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
 * must occur in the same context, for example, it is illegal to invoke
 * rcu_read_unlock_sched() from process context if the matching
 * rcu_read_lock_sched() was invoked from an NMI handler.
817
 */
818 819 820
static inline void rcu_read_lock_sched(void)
{
	preempt_disable();
821
	__acquire(RCU_SCHED);
822
	rcu_lock_acquire(&rcu_sched_lock_map);
823 824
	rcu_lockdep_assert(!rcu_is_cpu_idle(),
			   "rcu_read_lock_sched() used illegally while idle");
825
}
826 827

/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
828
static inline notrace void rcu_read_lock_sched_notrace(void)
829 830
{
	preempt_disable_notrace();
831
	__acquire(RCU_SCHED);
832
}
833 834 835 836 837 838

/*
 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
 *
 * See rcu_read_lock_sched for more information.
 */
839 840
static inline void rcu_read_unlock_sched(void)
{
841 842
	rcu_lockdep_assert(!rcu_is_cpu_idle(),
			   "rcu_read_unlock_sched() used illegally while idle");
843
	rcu_lock_release(&rcu_sched_lock_map);
844
	__release(RCU_SCHED);
845 846
	preempt_enable();
}
847 848

/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
849
static inline notrace void rcu_read_unlock_sched_notrace(void)
850
{
851
	__release(RCU_SCHED);
852 853
	preempt_enable_notrace();
}
854

L
Linus Torvalds 已提交
855
/**
856 857 858
 * rcu_assign_pointer() - assign to RCU-protected pointer
 * @p: pointer to assign to
 * @v: value to assign (publish)
859
 *
860 861
 * Assigns the specified value to the specified RCU-protected
 * pointer, ensuring that any concurrent RCU readers will see
862
 * any prior initialization.
L
Linus Torvalds 已提交
863 864
 *
 * Inserts memory barriers on architectures that require them
865 866 867 868 869 870 871 872 873 874 875 876
 * (which is most of them), and also prevents the compiler from
 * reordering the code that initializes the structure after the pointer
 * assignment.  More importantly, this call documents which pointers
 * will be dereferenced by RCU read-side code.
 *
 * In some special cases, you may use RCU_INIT_POINTER() instead
 * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
 * to the fact that it does not constrain either the CPU or the compiler.
 * That said, using RCU_INIT_POINTER() when you should have used
 * rcu_assign_pointer() is a very bad thing that results in
 * impossible-to-diagnose memory corruption.  So please be careful.
 * See the RCU_INIT_POINTER() comment header for details.
L
Linus Torvalds 已提交
877
 */
878
#define rcu_assign_pointer(p, v) \
879 880 881 882 883
	__rcu_assign_pointer((p), (v), __rcu)

/**
 * RCU_INIT_POINTER() - initialize an RCU protected pointer
 *
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
 * Initialize an RCU-protected pointer in special cases where readers
 * do not need ordering constraints on the CPU or the compiler.  These
 * special cases are:
 *
 * 1.	This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
 * 2.	The caller has taken whatever steps are required to prevent
 *	RCU readers from concurrently accessing this pointer -or-
 * 3.	The referenced data structure has already been exposed to
 *	readers either at compile time or via rcu_assign_pointer() -and-
 *	a.	You have not made -any- reader-visible changes to
 *		this structure since then -or-
 *	b.	It is OK for readers accessing this structure from its
 *		new location to see the old state of the structure.  (For
 *		example, the changes were to statistical counters or to
 *		other state where exact synchronization is not required.)
 *
 * Failure to follow these rules governing use of RCU_INIT_POINTER() will
 * result in impossible-to-diagnose memory corruption.  As in the structures
 * will look OK in crash dumps, but any concurrent RCU readers might
 * see pre-initialized values of the referenced data structure.  So
 * please be very careful how you use RCU_INIT_POINTER()!!!
 *
 * If you are creating an RCU-protected linked structure that is accessed
 * by a single external-to-structure RCU-protected pointer, then you may
 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
 * pointers, but you must use rcu_assign_pointer() to initialize the
 * external-to-structure pointer -after- you have completely initialized
 * the reader-accessible portions of the linked structure.
912 913
 */
#define RCU_INIT_POINTER(p, v) \
914 915 916
	do { \
		p = (typeof(*v) __force __rcu *)(v); \
	} while (0)
L
Lai Jiangshan 已提交
917

918 919 920 921 922 923 924
/**
 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
 *
 * GCC-style initialization for an RCU-protected pointer in a structure field.
 */
#define RCU_POINTER_INITIALIZER(p, v) \
		.p = (typeof(*v) __force __rcu *)(v)
L
Lai Jiangshan 已提交
925

926 927 928 929 930 931 932 933 934 935 936 937
/*
 * Does the specified offset indicate that the corresponding rcu_head
 * structure can be handled by kfree_rcu()?
 */
#define __is_kfree_rcu_offset(offset) ((offset) < 4096)

/*
 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
 */
#define __kfree_rcu(head, offset) \
	do { \
		BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
P
Paul E. McKenney 已提交
938
		kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
939 940
	} while (0)

L
Lai Jiangshan 已提交
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
/**
 * kfree_rcu() - kfree an object after a grace period.
 * @ptr:	pointer to kfree
 * @rcu_head:	the name of the struct rcu_head within the type of @ptr.
 *
 * Many rcu callbacks functions just call kfree() on the base structure.
 * These functions are trivial, but their size adds up, and furthermore
 * when they are used in a kernel module, that module must invoke the
 * high-latency rcu_barrier() function at module-unload time.
 *
 * The kfree_rcu() function handles this issue.  Rather than encoding a
 * function address in the embedded rcu_head structure, kfree_rcu() instead
 * encodes the offset of the rcu_head structure within the base structure.
 * Because the functions are not allowed in the low-order 4096 bytes of
 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
 * If the offset is larger than 4095 bytes, a compile-time error will
 * be generated in __kfree_rcu().  If this error is triggered, you can
 * either fall back to use of call_rcu() or rearrange the structure to
 * position the rcu_head structure into the first 4096 bytes.
 *
 * Note that the allowable offset might decrease in the future, for example,
 * to allow something like kmem_cache_free_rcu().
963 964 965
 *
 * The BUILD_BUG_ON check must not involve any function calls, hence the
 * checks are done in macros here.
L
Lai Jiangshan 已提交
966 967 968 969
 */
#define kfree_rcu(ptr, rcu_head)					\
	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))

L
Linus Torvalds 已提交
970
#endif /* __LINUX_RCUPDATE_H */