lockdep.h 20.1 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3
/*
 * Runtime locking correctness validator
 *
P
Peter Zijlstra 已提交
4
 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
I
Ingo Molnar 已提交
6
 *
7
 * see Documentation/locking/lockdep-design.txt for more details.
I
Ingo Molnar 已提交
8 9 10 11
 */
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H

12
struct task_struct;
P
Peter Zijlstra 已提交
13
struct lockdep_map;
14

D
Dave Young 已提交
15 16 17 18
/* for sysctl */
extern int prove_locking;
extern int lock_stat;

19 20
#define MAX_LOCKDEP_SUBCLASSES		8UL

21 22
#ifdef CONFIG_LOCKDEP

I
Ingo Molnar 已提交
23 24 25 26 27 28
#include <linux/linkage.h>
#include <linux/list.h>
#include <linux/debug_locks.h>
#include <linux/stacktrace.h>

/*
29 30
 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
 * the total number of states... :-(
I
Ingo Molnar 已提交
31
 */
32
#define XXX_LOCK_USAGE_STATES		(1+2*4)
I
Ingo Molnar 已提交
33

34 35 36 37 38 39 40 41 42 43 44
/*
 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
 * cached in the instance of lockdep_map
 *
 * Currently main class (subclass == 0) and signle depth subclass
 * are cached in lockdep_map. This optimization is mainly targeting
 * on rq->lock. double_rq_lock() acquires this highly competitive with
 * single depth.
 */
#define NR_LOCKDEP_CACHING_CLASSES	2

I
Ingo Molnar 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * Lock-classes are keyed via unique addresses, by embedding the
 * lockclass-key into the kernel (or module) .data section. (For
 * static locks we use the lock address itself as the key.)
 */
struct lockdep_subclass_key {
	char __one_byte;
} __attribute__ ((__packed__));

struct lock_class_key {
	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
};

58 59
extern struct lock_class_key __lockdep_no_validate__;

P
Peter Zijlstra 已提交
60 61
#define LOCKSTAT_POINTS		4

I
Ingo Molnar 已提交
62 63 64 65 66 67 68
/*
 * The lock-class itself:
 */
struct lock_class {
	/*
	 * class-hash:
	 */
69
	struct hlist_node		hash_entry;
I
Ingo Molnar 已提交
70 71 72 73 74 75 76 77

	/*
	 * global list of all lock-classes:
	 */
	struct list_head		lock_entry;

	struct lockdep_subclass_key	*key;
	unsigned int			subclass;
78
	unsigned int			dep_gen_id;
I
Ingo Molnar 已提交
79 80 81 82 83

	/*
	 * IRQ/softirq usage tracking bits:
	 */
	unsigned long			usage_mask;
84
	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES];
I
Ingo Molnar 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105

	/*
	 * These fields represent a directed graph of lock dependencies,
	 * to every node we attach a list of "forward" and a list of
	 * "backward" graph nodes.
	 */
	struct list_head		locks_after, locks_before;

	/*
	 * Generation counter, when doing certain classes of graph walking,
	 * to ensure that we check one node only once:
	 */
	unsigned int			version;

	/*
	 * Statistics counter:
	 */
	unsigned long			ops;

	const char			*name;
	int				name_version;
P
Peter Zijlstra 已提交
106 107

#ifdef CONFIG_LOCK_STAT
P
Peter Zijlstra 已提交
108 109
	unsigned long			contention_point[LOCKSTAT_POINTS];
	unsigned long			contending_point[LOCKSTAT_POINTS];
P
Peter Zijlstra 已提交
110 111 112 113 114 115 116 117 118
#endif
};

#ifdef CONFIG_LOCK_STAT
struct lock_time {
	s64				min;
	s64				max;
	s64				total;
	unsigned long			nr;
I
Ingo Molnar 已提交
119 120
};

P
Peter Zijlstra 已提交
121 122 123 124 125 126 127 128 129 130 131
enum bounce_type {
	bounce_acquired_write,
	bounce_acquired_read,
	bounce_contended_write,
	bounce_contended_read,
	nr_bounce_types,

	bounce_acquired = bounce_acquired_write,
	bounce_contended = bounce_contended_write,
};

P
Peter Zijlstra 已提交
132
struct lock_class_stats {
133 134
	unsigned long			contention_point[LOCKSTAT_POINTS];
	unsigned long			contending_point[LOCKSTAT_POINTS];
P
Peter Zijlstra 已提交
135 136 137 138
	struct lock_time		read_waittime;
	struct lock_time		write_waittime;
	struct lock_time		read_holdtime;
	struct lock_time		write_holdtime;
P
Peter Zijlstra 已提交
139
	unsigned long			bounces[nr_bounce_types];
P
Peter Zijlstra 已提交
140 141 142 143 144 145
};

struct lock_class_stats lock_stats(struct lock_class *class);
void clear_lock_stats(struct lock_class *class);
#endif

I
Ingo Molnar 已提交
146 147 148 149 150 151
/*
 * Map the lock object (the lock instance) to the lock-class object.
 * This is embedded into specific lock instances:
 */
struct lockdep_map {
	struct lock_class_key		*key;
152
	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
I
Ingo Molnar 已提交
153
	const char			*name;
P
Peter Zijlstra 已提交
154 155
#ifdef CONFIG_LOCK_STAT
	int				cpu;
P
Peter Zijlstra 已提交
156
	unsigned long			ip;
P
Peter Zijlstra 已提交
157
#endif
158 159 160 161 162 163
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
	/*
	 * Whether it's a crosslock.
	 */
	int				cross;
#endif
I
Ingo Molnar 已提交
164 165
};

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static inline void lockdep_copy_map(struct lockdep_map *to,
				    struct lockdep_map *from)
{
	int i;

	*to = *from;
	/*
	 * Since the class cache can be modified concurrently we could observe
	 * half pointers (64bit arch using 32bit copy insns). Therefore clear
	 * the caches and take the performance hit.
	 *
	 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
	 *     that relies on cache abuse.
	 */
	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
		to->class_cache[i] = NULL;
}

I
Ingo Molnar 已提交
184 185 186 187 188 189 190 191
/*
 * Every lock has a list of other locks that were taken after it.
 * We only grow the list, never remove from it:
 */
struct lock_list {
	struct list_head		entry;
	struct lock_class		*class;
	struct stack_trace		trace;
192
	int				distance;
193

P
Peter Zijlstra 已提交
194 195 196
	/*
	 * The parent field is used to implement breadth-first search, and the
	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
197 198
	 */
	struct lock_list		*parent;
I
Ingo Molnar 已提交
199 200 201 202 203 204
};

/*
 * We record lock dependency chains, so that we can cache them:
 */
struct lock_chain {
205 206 207 208 209
	/* see BUILD_BUG_ON()s in lookup_chain_cache() */
	unsigned int			irq_context :  2,
					depth       :  6,
					base	    : 24;
	/* 4 byte hole */
210
	struct hlist_node		entry;
I
Ingo Molnar 已提交
211 212 213
	u64				chain_key;
};

I
Ingo Molnar 已提交
214
#define MAX_LOCKDEP_KEYS_BITS		13
215 216 217 218 219 220
/*
 * Subtract one because we offset hlock->class_idx by 1 in order
 * to make 0 mean no class. This avoids overflowing the class_idx
 * bitfield and hitting the BUG in hlock_class().
 */
#define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
D
Dave Jones 已提交
221

I
Ingo Molnar 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
struct held_lock {
	/*
	 * One-way hash of the dependency chain up to this point. We
	 * hash the hashes step by step as the dependency chain grows.
	 *
	 * We use it for dependency-caching and we skip detection
	 * passes and dependency-updates if there is a cache-hit, so
	 * it is absolutely critical for 100% coverage of the validator
	 * to have a unique key value for every unique dependency path
	 * that can occur in the system, to make a unique hash value
	 * as likely as possible - hence the 64-bit width.
	 *
	 * The task struct holds the current hash value (initialized
	 * with zero), here we store the previous hash value:
	 */
	u64				prev_chain_key;
	unsigned long			acquire_ip;
	struct lockdep_map		*instance;
P
Peter Zijlstra 已提交
240
	struct lockdep_map		*nest_lock;
P
Peter Zijlstra 已提交
241 242 243 244
#ifdef CONFIG_LOCK_STAT
	u64 				waittime_stamp;
	u64				holdtime_stamp;
#endif
D
Dave Jones 已提交
245
	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
I
Ingo Molnar 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258
	/*
	 * The lock-stack is unified in that the lock chains of interrupt
	 * contexts nest ontop of process context chains, but we 'separate'
	 * the hashes by starting with 0 if we cross into an interrupt
	 * context, and we also keep do not add cross-context lock
	 * dependencies - the lock usage graph walking covers that area
	 * anyway, and we'd just unnecessarily increase the number of
	 * dependencies otherwise. [Note: hardirq and softirq contexts
	 * are separated from each other too.]
	 *
	 * The following field is used to detect when we cross into an
	 * interrupt context:
	 */
D
Dave Jones 已提交
259
	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
260 261
	unsigned int trylock:1;						/* 16 bits */

D
Dave Jones 已提交
262
	unsigned int read:2;        /* see lock_acquire() comment */
263
	unsigned int check:1;       /* see lock_acquire() comment */
D
Dave Jones 已提交
264
	unsigned int hardirqs_off:1;
265
	unsigned int references:12;					/* 32 bits */
P
Peter Zijlstra 已提交
266
	unsigned int pin_count;
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
	/*
	 * Generation id.
	 *
	 * A value of cross_gen_id will be stored when holding this,
	 * which is globally increased whenever each crosslock is held.
	 */
	unsigned int gen_id;
#endif
};

#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCK_TRACE_ENTRIES 5

/*
 * This is for keeping locks waiting for commit so that true dependencies
 * can be added at commit step.
 */
struct hist_lock {
	/*
	 * Seperate stack_trace data. This will be used at commit step.
	 */
	struct stack_trace	trace;
	unsigned long		trace_entries[MAX_XHLOCK_TRACE_ENTRIES];

	/*
	 * Seperate hlock instance. This will be used at commit step.
	 *
	 * TODO: Use a smaller data structure containing only necessary
	 * data. However, we should make lockdep code able to handle the
	 * smaller one first.
	 */
	struct held_lock	hlock;
};

/*
 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
 * be called instead of lockdep_init_map().
 */
struct cross_lock {
	/*
	 * Seperate hlock instance. This will be used at commit step.
	 *
	 * TODO: Use a smaller data structure containing only necessary
	 * data. However, we should make lockdep code able to handle the
	 * smaller one first.
	 */
	struct held_lock	hlock;
};

struct lockdep_map_cross {
	struct lockdep_map map;
	struct cross_lock xlock;
I
Ingo Molnar 已提交
320
};
321
#endif
I
Ingo Molnar 已提交
322 323 324 325 326 327 328 329

/*
 * Initialization, self-test and debugging-output methods:
 */
extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
330
extern asmlinkage void lockdep_sys_exit(void);
I
Ingo Molnar 已提交
331 332 333 334 335 336 337 338 339 340 341

extern void lockdep_off(void);
extern void lockdep_on(void);

/*
 * These methods are used by specific locking variants (spinlocks,
 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
 * to lockdep:
 */

extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
342
			     struct lock_class_key *key, int subclass);
I
Ingo Molnar 已提交
343 344 345 346 347 348 349 350

/*
 * Reinitialize a lock key - for cases where there is special locking or
 * special initialization of locks so that the validator gets the scope
 * of dependencies wrong: they are either too broad (they need a class-split)
 * or they are too narrow (they suffer from a false class-split):
 */
#define lockdep_set_class(lock, key) \
351
		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
I
Ingo Molnar 已提交
352
#define lockdep_set_class_and_name(lock, key, name) \
353 354 355 356 357 358
		lockdep_init_map(&(lock)->dep_map, name, key, 0)
#define lockdep_set_class_and_subclass(lock, key, sub) \
		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
#define lockdep_set_subclass(lock, sub)	\
		lockdep_init_map(&(lock)->dep_map, #lock, \
				 (lock)->dep_map.key, sub)
359 360

#define lockdep_set_novalidate_class(lock) \
361
	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
362 363 364 365 366 367 368 369 370 371
/*
 * Compare locking classes
 */
#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)

static inline int lockdep_match_key(struct lockdep_map *lock,
				    struct lock_class_key *key)
{
	return lock->key == key;
}
I
Ingo Molnar 已提交
372 373 374 375 376 377 378 379 380 381 382 383

/*
 * Acquire a lock.
 *
 * Values for "read":
 *
 *   0: exclusive (write) acquire
 *   1: read-acquire (no recursion allowed)
 *   2: read-acquire with same-instance recursion allowed
 *
 * Values for check:
 *
384 385
 *   0: simple checks (freeing, held-at-exit-time, etc.)
 *   1: full validation
I
Ingo Molnar 已提交
386 387
 */
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
P
Peter Zijlstra 已提交
388 389
			 int trylock, int read, int check,
			 struct lockdep_map *nest_lock, unsigned long ip);
I
Ingo Molnar 已提交
390 391 392 393

extern void lock_release(struct lockdep_map *lock, int nested,
			 unsigned long ip);

394 395 396 397 398 399 400 401 402
/*
 * Same "read" as for lock_acquire(), except -1 means any.
 */
extern int lock_is_held_type(struct lockdep_map *lock, int read);

static inline int lock_is_held(struct lockdep_map *lock)
{
	return lock_is_held_type(lock, -1);
}
403

404 405
#define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
#define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
406

407 408 409 410 411 412 413 414 415
extern void lock_set_class(struct lockdep_map *lock, const char *name,
			   struct lock_class_key *key, unsigned int subclass,
			   unsigned long ip);

static inline void lock_set_subclass(struct lockdep_map *lock,
		unsigned int subclass, unsigned long ip)
{
	lock_set_class(lock, lock->name, lock->key, subclass, ip);
}
416

417 418
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);

419 420 421 422 423 424 425
struct pin_cookie { unsigned int val; };

#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }

extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
P
Peter Zijlstra 已提交
426

427
# define INIT_LOCKDEP				.lockdep_recursion = 0,
I
Ingo Molnar 已提交
428

429
#define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
430

431 432 433
#define lockdep_assert_held(l)	do {				\
		WARN_ON(debug_locks && !lockdep_is_held(l));	\
	} while (0)
434

435 436 437 438 439 440 441 442
#define lockdep_assert_held_exclusive(l)	do {			\
		WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));	\
	} while (0)

#define lockdep_assert_held_read(l)	do {				\
		WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));	\
	} while (0)

443 444 445 446
#define lockdep_assert_held_once(l)	do {				\
		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
	} while (0)

447 448
#define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)

449 450 451
#define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
#define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
#define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
P
Peter Zijlstra 已提交
452

453
#else /* !CONFIG_LOCKDEP */
I
Ingo Molnar 已提交
454 455 456 457 458 459 460 461 462

static inline void lockdep_off(void)
{
}

static inline void lockdep_on(void)
{
}

P
Peter Zijlstra 已提交
463
# define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
I
Ingo Molnar 已提交
464
# define lock_release(l, n, i)			do { } while (0)
465
# define lock_downgrade(l, i)			do { } while (0)
466
# define lock_set_class(l, n, k, s, i)		do { } while (0)
467
# define lock_set_subclass(l, s, i)		do { } while (0)
I
Ingo Molnar 已提交
468
# define lockdep_info()				do { } while (0)
469 470
# define lockdep_init_map(lock, name, key, sub) \
		do { (void)(name); (void)(key); } while (0)
I
Ingo Molnar 已提交
471 472
# define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
# define lockdep_set_class_and_name(lock, key, name) \
473
		do { (void)(key); (void)(name); } while (0)
474 475
#define lockdep_set_class_and_subclass(lock, key, sub) \
		do { (void)(key); } while (0)
476
#define lockdep_set_subclass(lock, sub)		do { } while (0)
477 478 479

#define lockdep_set_novalidate_class(lock) do { } while (0)

480 481 482 483 484
/*
 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
 * case since the result is not well defined and the caller should rather
 * #ifdef the call himself.
 */
485

I
Ingo Molnar 已提交
486 487 488
# define INIT_LOCKDEP
# define lockdep_reset()		do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size)	do { } while (0)
P
Peter Zijlstra 已提交
489
# define lockdep_sys_exit() 			do { } while (0)
I
Ingo Molnar 已提交
490 491 492 493
/*
 * The class key takes no space if lockdep is disabled:
 */
struct lock_class_key { };
494 495 496

#define lockdep_depth(tsk)	(0)

497 498
#define lockdep_is_held_type(l, r)		(1)

499
#define lockdep_assert_held(l)			do { (void)(l); } while (0)
500 501
#define lockdep_assert_held_exclusive(l)	do { (void)(l); } while (0)
#define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
502
#define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
503

504 505
#define lockdep_recursing(tsk)			(0)

506 507 508 509 510 511 512
struct pin_cookie { };

#define NIL_COOKIE (struct pin_cookie){ }

#define lockdep_pin_lock(l)			({ struct pin_cookie cookie; cookie; })
#define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
P
Peter Zijlstra 已提交
513

I
Ingo Molnar 已提交
514 515
#endif /* !LOCKDEP */

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
enum xhlock_context_t {
	XHLOCK_HARD,
	XHLOCK_SOFT,
	XHLOCK_PROC,
	XHLOCK_CTX_NR,
};

#ifdef CONFIG_LOCKDEP_CROSSRELEASE
extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
				       const char *name,
				       struct lock_class_key *key,
				       int subclass);
extern void lock_commit_crosslock(struct lockdep_map *lock);

#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
	{ .map.name = (_name), .map.key = (void *)(_key), \
	  .map.cross = 1, }

/*
 * To initialize a lockdep_map statically use this macro.
 * Note that _name must not be NULL.
 */
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
	{ .name = (_name), .key = (void *)(_key), .cross = 0, }

extern void crossrelease_hist_start(enum xhlock_context_t c);
extern void crossrelease_hist_end(enum xhlock_context_t c);
extern void lockdep_init_task(struct task_struct *task);
extern void lockdep_free_task(struct task_struct *task);
#else
/*
 * To initialize a lockdep_map statically use this macro.
 * Note that _name must not be NULL.
 */
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
	{ .name = (_name), .key = (void *)(_key), }

static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
#endif

P
Peter Zijlstra 已提交
559 560 561
#ifdef CONFIG_LOCK_STAT

extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
P
Peter Zijlstra 已提交
562
extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
P
Peter Zijlstra 已提交
563 564 565 566 567 568 569

#define LOCK_CONTENDED(_lock, try, lock)			\
do {								\
	if (!try(_lock)) {					\
		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
		lock(_lock);					\
	}							\
P
Peter Zijlstra 已提交
570
	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
P
Peter Zijlstra 已提交
571 572
} while (0)

573 574 575 576 577 578 579 580 581 582 583 584
#define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
({								\
	int ____err = 0;					\
	if (!try(_lock)) {					\
		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
		____err = lock(_lock);				\
	}							\
	if (!____err)						\
		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
	____err;						\
})

P
Peter Zijlstra 已提交
585 586 587
#else /* CONFIG_LOCK_STAT */

#define lock_contended(lockdep_map, ip) do {} while (0)
P
Peter Zijlstra 已提交
588
#define lock_acquired(lockdep_map, ip) do {} while (0)
P
Peter Zijlstra 已提交
589 590 591 592

#define LOCK_CONTENDED(_lock, try, lock) \
	lock(_lock)

593 594 595
#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
	lock(_lock)

P
Peter Zijlstra 已提交
596 597
#endif /* CONFIG_LOCK_STAT */

598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
#ifdef CONFIG_LOCKDEP

/*
 * On lockdep we dont want the hand-coded irq-enable of
 * _raw_*_lock_flags() code, because lockdep assumes
 * that interrupts are not re-enabled during lock-acquire:
 */
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
	LOCK_CONTENDED((_lock), (try), (lock))

#else /* CONFIG_LOCKDEP */

#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
	lockfl((_lock), (flags))

#endif /* CONFIG_LOCKDEP */

I
Ingo Molnar 已提交
615
#ifdef CONFIG_TRACE_IRQFLAGS
616
extern void print_irqtrace_events(struct task_struct *curr);
I
Ingo Molnar 已提交
617
#else
618 619 620
static inline void print_irqtrace_events(struct task_struct *curr)
{
}
I
Ingo Molnar 已提交
621 622 623 624 625 626 627 628 629 630 631 632 633 634
#endif

/*
 * For trivial one-depth nesting of a lock-class, the following
 * global define can be used. (Subsystems with multiple levels
 * of nesting should define their own lock-nesting subclasses.)
 */
#define SINGLE_DEPTH_NESTING			1

/*
 * Map the dependency ops to NOP or to real lockdep ops, depending
 * on the per lock-class debug mode:
 */

635 636 637
#define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
#define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
#define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
I
Ingo Molnar 已提交
638

639 640 641
#define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
#define spin_release(l, n, i)			lock_release(l, n, i)
I
Ingo Molnar 已提交
642

643 644 645
#define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
#define rwlock_acquire_read(l, s, t, i)		lock_acquire_shared_recursive(l, s, t, NULL, i)
#define rwlock_release(l, n, i)			lock_release(l, n, i)
I
Ingo Molnar 已提交
646

647 648 649 650
#define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
#define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
#define seqcount_release(l, n, i)		lock_release(l, n, i)

651 652 653 654 655 656 657
#define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
#define mutex_release(l, n, i)			lock_release(l, n, i)

#define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
658
#define rwsem_release(l, n, i)			lock_release(l, n, i)
I
Ingo Molnar 已提交
659

660 661
#define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
662
#define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
663
#define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
P
Peter Zijlstra 已提交
664

665 666 667 668
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) 						\
do {									\
	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
669
	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
670 671 672 673 674
	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
} while (0)
# define might_lock_read(lock) 						\
do {									\
	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
675
	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
676 677 678 679 680 681 682
	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
} while (0)
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
#endif

683
#ifdef CONFIG_LOCKDEP
684
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
685 686 687 688 689
#else
static inline void
lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
}
690 691
#endif

I
Ingo Molnar 已提交
692
#endif /* __LINUX_LOCKDEP_H */