lockdep.c 110.2 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7
/*
 * kernel/lockdep.c
 *
 * Runtime locking correctness validator
 *
 * Started by Ingo Molnar:
 *
P
Peter Zijlstra 已提交
8
 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
I
Ingo Molnar 已提交
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * this code maps all the lock dependencies as they occur in a live kernel
 * and will warn about the following classes of locking bugs:
 *
 * - lock inversion scenarios
 * - circular lock dependencies
 * - hardirq/softirq safe/unsafe locking bugs
 *
 * Bugs are reported even if the current locking scenario does not cause
 * any deadlock at this point.
 *
 * I.e. if anytime in the past two locks were taken in a different order,
 * even if it happened for another task, even if those were different
 * locks (but of the same class as this lock), this code will detect it.
 *
 * Thanks to Arjan van de Ven for coming up with the initial idea of
 * mapping lock dependencies runtime.
 */
28
#define DISABLE_BRANCH_PROFILING
I
Ingo Molnar 已提交
29 30
#include <linux/mutex.h>
#include <linux/sched.h>
31
#include <linux/sched/clock.h>
32
#include <linux/sched/task.h>
33
#include <linux/sched/mm.h>
I
Ingo Molnar 已提交
34 35 36 37 38 39 40 41 42 43
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
44
#include <linux/utsname.h>
P
Peter Zijlstra 已提交
45
#include <linux/hash.h>
46
#include <linux/ftrace.h>
P
Peter Zijlstra 已提交
47
#include <linux/stringify.h>
48
#include <linux/bitops.h>
49
#include <linux/gfp.h>
50
#include <linux/random.h>
51
#include <linux/jhash.h>
52
#include <linux/nmi.h>
P
Peter Zijlstra 已提交
53

I
Ingo Molnar 已提交
54 55 56 57
#include <asm/sections.h>

#include "lockdep_internals.h"

58
#define CREATE_TRACE_POINTS
59
#include <trace/events/lock.h>
60

P
Peter Zijlstra 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73 74
#ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
#endif

#ifdef CONFIG_LOCK_STAT
int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else
#define lock_stat 0
#endif

I
Ingo Molnar 已提交
75
/*
76 77
 * lockdep_lock: protects the lockdep graph, the hashes and the
 *               class/list/hash allocators.
I
Ingo Molnar 已提交
78 79 80
 *
 * This is one of the rare exceptions where it's justified
 * to use a raw spinlock - we really dont want the spinlock
81
 * code to recurse back into the lockdep code...
I
Ingo Molnar 已提交
82
 */
83
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
84 85 86

static int graph_lock(void)
{
87
	arch_spin_lock(&lockdep_lock);
88 89 90 91 92 93 94
	/*
	 * Make sure that if another CPU detected a bug while
	 * walking the graph we dont change it (while the other
	 * CPU is busy printing out stuff with the graph lock
	 * dropped already)
	 */
	if (!debug_locks) {
95
		arch_spin_unlock(&lockdep_lock);
96 97
		return 0;
	}
98 99
	/* prevent any recursions within lockdep from causing deadlocks */
	current->lockdep_recursion++;
100 101 102 103 104
	return 1;
}

static inline int graph_unlock(void)
{
P
Peter Zijlstra 已提交
105 106 107 108 109
	if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
		/*
		 * The lockdep graph lock isn't locked while we expect it to
		 * be, we're confused now, bye!
		 */
110
		return DEBUG_LOCKS_WARN_ON(1);
P
Peter Zijlstra 已提交
111
	}
112

113
	current->lockdep_recursion--;
114
	arch_spin_unlock(&lockdep_lock);
115 116 117 118 119 120 121 122 123 124 125
	return 0;
}

/*
 * Turn lock debugging off and return with 0 if it was off already,
 * and also release the graph lock:
 */
static inline int debug_locks_off_graph_unlock(void)
{
	int ret = debug_locks_off();

126
	arch_spin_unlock(&lockdep_lock);
127 128 129

	return ret;
}
I
Ingo Molnar 已提交
130 131

unsigned long nr_list_entries;
P
Peter Zijlstra 已提交
132
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
I
Ingo Molnar 已提交
133 134 135 136 137 138 139 140 141 142

/*
 * All data structures here are protected by the global debug_lock.
 *
 * Mutex key structs only get allocated, once during bootup, and never
 * get freed - this significantly simplifies the debugging code.
 */
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];

D
Dave Jones 已提交
143 144 145
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
	if (!hlock->class_idx) {
P
Peter Zijlstra 已提交
146 147 148
		/*
		 * Someone passed in garbage, we give up.
		 */
D
Dave Jones 已提交
149 150 151 152 153 154
		DEBUG_LOCKS_WARN_ON(1);
		return NULL;
	}
	return lock_classes + hlock->class_idx - 1;
}

P
Peter Zijlstra 已提交
155
#ifdef CONFIG_LOCK_STAT
156
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
P
Peter Zijlstra 已提交
157

158 159
static inline u64 lockstat_clock(void)
{
160
	return local_clock();
161 162
}

P
Peter Zijlstra 已提交
163
static int lock_point(unsigned long points[], unsigned long ip)
P
Peter Zijlstra 已提交
164 165 166
{
	int i;

P
Peter Zijlstra 已提交
167 168 169
	for (i = 0; i < LOCKSTAT_POINTS; i++) {
		if (points[i] == 0) {
			points[i] = ip;
P
Peter Zijlstra 已提交
170 171
			break;
		}
P
Peter Zijlstra 已提交
172
		if (points[i] == ip)
P
Peter Zijlstra 已提交
173 174 175 176 177 178
			break;
	}

	return i;
}

179
static void lock_time_inc(struct lock_time *lt, u64 time)
P
Peter Zijlstra 已提交
180 181 182 183
{
	if (time > lt->max)
		lt->max = time;

184
	if (time < lt->min || !lt->nr)
P
Peter Zijlstra 已提交
185 186 187 188 189 190
		lt->min = time;

	lt->total += time;
	lt->nr++;
}

191 192
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
193 194 195 196 197 198 199 200 201
	if (!src->nr)
		return;

	if (src->max > dst->max)
		dst->max = src->max;

	if (src->min < dst->min || !dst->nr)
		dst->min = src->min;

202 203 204 205 206 207 208 209 210 211 212 213
	dst->total += src->total;
	dst->nr += src->nr;
}

struct lock_class_stats lock_stats(struct lock_class *class)
{
	struct lock_class_stats stats;
	int cpu, i;

	memset(&stats, 0, sizeof(struct lock_class_stats));
	for_each_possible_cpu(cpu) {
		struct lock_class_stats *pcs =
214
			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
215 216 217 218

		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
			stats.contention_point[i] += pcs->contention_point[i];

P
Peter Zijlstra 已提交
219 220 221
		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
			stats.contending_point[i] += pcs->contending_point[i];

222 223 224 225 226
		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
		lock_time_add(&pcs->write_waittime, &stats.write_waittime);

		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
P
Peter Zijlstra 已提交
227 228 229

		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
			stats.bounces[i] += pcs->bounces[i];
230 231 232 233 234 235 236 237 238 239 240
	}

	return stats;
}

void clear_lock_stats(struct lock_class *class)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct lock_class_stats *cpu_stats =
241
			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
242 243 244 245

		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
	}
	memset(class->contention_point, 0, sizeof(class->contention_point));
P
Peter Zijlstra 已提交
246
	memset(class->contending_point, 0, sizeof(class->contending_point));
247 248
}

P
Peter Zijlstra 已提交
249 250
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
251
	return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
P
Peter Zijlstra 已提交
252 253 254 255 256
}

static void lock_release_holdtime(struct held_lock *hlock)
{
	struct lock_class_stats *stats;
257
	u64 holdtime;
P
Peter Zijlstra 已提交
258 259 260 261

	if (!lock_stat)
		return;

262
	holdtime = lockstat_clock() - hlock->holdtime_stamp;
P
Peter Zijlstra 已提交
263

D
Dave Jones 已提交
264
	stats = get_lock_stats(hlock_class(hlock));
P
Peter Zijlstra 已提交
265 266 267 268 269 270 271 272 273 274 275
	if (hlock->read)
		lock_time_inc(&stats->read_holdtime, holdtime);
	else
		lock_time_inc(&stats->write_holdtime, holdtime);
}
#else
static inline void lock_release_holdtime(struct held_lock *hlock)
{
}
#endif

I
Ingo Molnar 已提交
276 277 278 279 280 281 282 283 284 285 286 287
/*
 * We keep a global list of all lock classes. The list only grows,
 * never shrinks. The list is only accessed with the lockdep
 * spinlock lock held.
 */
LIST_HEAD(all_lock_classes);

/*
 * The lockdep classes are in a hash-table as well, for fast lookup:
 */
#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
P
Peter Zijlstra 已提交
288
#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
I
Ingo Molnar 已提交
289 290
#define classhashentry(key)	(classhash_table + __classhashfn((key)))

291
static struct hlist_head classhash_table[CLASSHASH_SIZE];
I
Ingo Molnar 已提交
292 293 294 295 296 297 298

/*
 * We put the lock dependency chains into a hash-table as well, to cache
 * their existence:
 */
#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
P
Peter Zijlstra 已提交
299
#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
I
Ingo Molnar 已提交
300 301
#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))

302
static struct hlist_head chainhash_table[CHAINHASH_SIZE];
I
Ingo Molnar 已提交
303 304 305 306 307 308 309

/*
 * The hash key of the lock dependency chains is a hash itself too:
 * it's a hash of all locks taken up to that lock, including that lock.
 * It's a 64-bit hash, because it's important for the keys to be
 * unique.
 */
310 311 312 313 314 315 316 317
static inline u64 iterate_chain_key(u64 key, u32 idx)
{
	u32 k0 = key, k1 = key >> 32;

	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */

	return k0 | (u64)k1 << 32;
}
I
Ingo Molnar 已提交
318

319
void lockdep_off(void)
I
Ingo Molnar 已提交
320 321 322 323 324
{
	current->lockdep_recursion++;
}
EXPORT_SYMBOL(lockdep_off);

325
void lockdep_on(void)
I
Ingo Molnar 已提交
326 327 328 329 330 331 332 333 334 335
{
	current->lockdep_recursion--;
}
EXPORT_SYMBOL(lockdep_on);

/*
 * Debugging switches:
 */

#define VERBOSE			0
336
#define VERY_VERBOSE		0
I
Ingo Molnar 已提交
337 338 339 340 341 342 343 344 345

#if VERBOSE
# define HARDIRQ_VERBOSE	1
# define SOFTIRQ_VERBOSE	1
#else
# define HARDIRQ_VERBOSE	0
# define SOFTIRQ_VERBOSE	0
#endif

346
#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
I
Ingo Molnar 已提交
347 348 349 350 351
/*
 * Quick filtering for interesting events:
 */
static int class_filter(struct lock_class *class)
{
A
Andi Kleen 已提交
352 353
#if 0
	/* Example */
I
Ingo Molnar 已提交
354
	if (class->name_version == 1 &&
A
Andi Kleen 已提交
355
			!strcmp(class->name, "lockname"))
I
Ingo Molnar 已提交
356 357
		return 1;
	if (class->name_version == 1 &&
A
Andi Kleen 已提交
358
			!strcmp(class->name, "&struct->lockfield"))
I
Ingo Molnar 已提交
359
		return 1;
A
Andi Kleen 已提交
360
#endif
361 362
	/* Filter everything else. 1 would be to allow everything else */
	return 0;
I
Ingo Molnar 已提交
363 364 365 366 367 368 369 370 371 372 373 374 375
}
#endif

static int verbose(struct lock_class *class)
{
#if VERBOSE
	return class_filter(class);
#endif
	return 0;
}

/*
 * Stack-trace: tightly packed array of stack backtrace
376
 * addresses. Protected by the graph_lock.
I
Ingo Molnar 已提交
377 378 379 380
 */
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];

381 382 383 384
static void print_lockdep_off(const char *bug_msg)
{
	printk(KERN_DEBUG "%s\n", bug_msg);
	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
385
#ifdef CONFIG_LOCK_STAT
386
	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
387
#endif
388 389
}

I
Ingo Molnar 已提交
390 391 392 393 394 395
static int save_trace(struct stack_trace *trace)
{
	trace->nr_entries = 0;
	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
	trace->entries = stack_trace + nr_stack_trace_entries;

396 397
	trace->skip = 3;

C
Christoph Hellwig 已提交
398
	save_stack_trace(trace);
I
Ingo Molnar 已提交
399

P
Peter Zijlstra 已提交
400 401 402 403 404 405 406
	/*
	 * Some daft arches put -1 at the end to indicate its a full trace.
	 *
	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
	 * complete trace that maxes out the entries provided will be reported
	 * as incomplete, friggin useless </rant>
	 */
407 408
	if (trace->nr_entries != 0 &&
	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
P
Peter Zijlstra 已提交
409 410
		trace->nr_entries--;

I
Ingo Molnar 已提交
411 412 413 414
	trace->max_entries = trace->nr_entries;

	nr_stack_trace_entries += trace->nr_entries;

P
Peter Zijlstra 已提交
415
	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
416 417 418
		if (!debug_locks_off_graph_unlock())
			return 0;

419
		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
420 421
		dump_stack();

I
Ingo Molnar 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
		return 0;
	}

	return 1;
}

unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;

#ifdef CONFIG_DEBUG_LOCKDEP
/*
 * Various lockdep statistics:
 */
437
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
I
Ingo Molnar 已提交
438 439 440 441 442 443
#endif

/*
 * Locking printouts:
 */

P
Peter Zijlstra 已提交
444
#define __USAGE(__STATE)						\
P
Peter Zijlstra 已提交
445 446 447 448
	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
P
Peter Zijlstra 已提交
449

I
Ingo Molnar 已提交
450 451
static const char *usage_str[] =
{
P
Peter Zijlstra 已提交
452 453 454 455
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
	[LOCK_USED] = "INITIAL USE",
I
Ingo Molnar 已提交
456 457 458 459
};

const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
{
A
Alexey Dobriyan 已提交
460
	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
I
Ingo Molnar 已提交
461 462
}

463
static inline unsigned long lock_flag(enum lock_usage_bit bit)
I
Ingo Molnar 已提交
464
{
465 466
	return 1UL << bit;
}
I
Ingo Molnar 已提交
467

468 469 470 471 472 473 474 475 476 477
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
	char c = '.';

	if (class->usage_mask & lock_flag(bit + 2))
		c = '+';
	if (class->usage_mask & lock_flag(bit)) {
		c = '-';
		if (class->usage_mask & lock_flag(bit + 2))
			c = '?';
I
Ingo Molnar 已提交
478 479
	}

480 481
	return c;
}
482

P
Peter Zijlstra 已提交
483
void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
484
{
P
Peter Zijlstra 已提交
485
	int i = 0;
486

P
Peter Zijlstra 已提交
487 488 489 490 491 492 493
#define LOCKDEP_STATE(__STATE) 						\
	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
#include "lockdep_states.h"
#undef LOCKDEP_STATE

	usage[i] = '\0';
I
Ingo Molnar 已提交
494 495
}

496
static void __print_lock_name(struct lock_class *class)
497 498 499 500
{
	char str[KSYM_NAME_LEN];
	const char *name;

I
Ingo Molnar 已提交
501 502 503
	name = class->name;
	if (!name) {
		name = __get_key_name(class->key, str);
D
Dmitry Vyukov 已提交
504
		printk(KERN_CONT "%s", name);
I
Ingo Molnar 已提交
505
	} else {
D
Dmitry Vyukov 已提交
506
		printk(KERN_CONT "%s", name);
I
Ingo Molnar 已提交
507
		if (class->name_version > 1)
D
Dmitry Vyukov 已提交
508
			printk(KERN_CONT "#%d", class->name_version);
I
Ingo Molnar 已提交
509
		if (class->subclass)
D
Dmitry Vyukov 已提交
510
			printk(KERN_CONT "/%d", class->subclass);
I
Ingo Molnar 已提交
511
	}
512 513 514 515 516 517 518 519
}

static void print_lock_name(struct lock_class *class)
{
	char usage[LOCK_USAGE_CHARS];

	get_usage_chars(class, usage);

D
Dmitry Vyukov 已提交
520
	printk(KERN_CONT " (");
521
	__print_lock_name(class);
D
Dmitry Vyukov 已提交
522
	printk(KERN_CONT "){%s}", usage);
I
Ingo Molnar 已提交
523 524 525 526 527
}

static void print_lockdep_cache(struct lockdep_map *lock)
{
	const char *name;
528
	char str[KSYM_NAME_LEN];
I
Ingo Molnar 已提交
529 530 531 532 533

	name = lock->name;
	if (!name)
		name = __get_key_name(lock->key->subkeys, str);

D
Dmitry Vyukov 已提交
534
	printk(KERN_CONT "%s", name);
I
Ingo Molnar 已提交
535 536 537 538
}

static void print_lock(struct held_lock *hlock)
{
539 540 541 542 543 544 545 546 547 548
	/*
	 * We can be called locklessly through debug_show_all_locks() so be
	 * extra careful, the hlock might have been released and cleared.
	 */
	unsigned int class_idx = hlock->class_idx;

	/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
	barrier();

	if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
D
Dmitry Vyukov 已提交
549
		printk(KERN_CONT "<RELEASED>\n");
550 551 552
		return;
	}

553
	printk(KERN_CONT "%p", hlock->instance);
554
	print_lock_name(lock_classes + class_idx - 1);
555
	printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
I
Ingo Molnar 已提交
556 557
}

558
static void lockdep_print_held_locks(struct task_struct *p)
I
Ingo Molnar 已提交
559
{
560
	int i, depth = READ_ONCE(p->lockdep_depth);
I
Ingo Molnar 已提交
561

562 563 564 565 566 567 568 569 570 571
	if (!depth)
		printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
	else
		printk("%d lock%s held by %s/%d:\n", depth,
		       depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
	/*
	 * It's not reliable to print a task's held locks if it's not sleeping
	 * and it's not the current task.
	 */
	if (p->state == TASK_RUNNING && p != current)
I
Ingo Molnar 已提交
572 573 574
		return;
	for (i = 0; i < depth; i++) {
		printk(" #%d: ", i);
575
		print_lock(p->held_locks + i);
I
Ingo Molnar 已提交
576 577 578
	}
}

579
static void print_kernel_ident(void)
P
Peter Zijlstra 已提交
580
{
581
	printk("%s %.*s %s\n", init_utsname()->release,
P
Peter Zijlstra 已提交
582
		(int)strcspn(init_utsname()->version, " "),
583 584
		init_utsname()->version,
		print_tainted());
P
Peter Zijlstra 已提交
585 586 587 588 589 590 591 592 593 594
}

static int very_verbose(struct lock_class *class)
{
#if VERY_VERBOSE
	return class_filter(class);
#endif
	return 0;
}

I
Ingo Molnar 已提交
595
/*
P
Peter Zijlstra 已提交
596
 * Is this the address of a static object:
I
Ingo Molnar 已提交
597
 */
598
#ifdef __KERNEL__
P
Peter Zijlstra 已提交
599
static int static_obj(void *obj)
I
Ingo Molnar 已提交
600
{
P
Peter Zijlstra 已提交
601 602 603 604
	unsigned long start = (unsigned long) &_stext,
		      end   = (unsigned long) &_end,
		      addr  = (unsigned long) obj;

I
Ingo Molnar 已提交
605
	/*
P
Peter Zijlstra 已提交
606
	 * static variable?
I
Ingo Molnar 已提交
607
	 */
P
Peter Zijlstra 已提交
608 609
	if ((addr >= start) && (addr < end))
		return 1;
I
Ingo Molnar 已提交
610

611 612 613
	if (arch_is_kernel_data(addr))
		return 1;

I
Ingo Molnar 已提交
614
	/*
615
	 * in-kernel percpu var?
I
Ingo Molnar 已提交
616
	 */
617 618
	if (is_kernel_percpu_address(addr))
		return 1;
I
Ingo Molnar 已提交
619

P
Peter Zijlstra 已提交
620
	/*
621
	 * module static or percpu var?
P
Peter Zijlstra 已提交
622
	 */
623
	return is_module_address(addr) || is_module_percpu_address(addr);
624
}
625
#endif
626

I
Ingo Molnar 已提交
627
/*
P
Peter Zijlstra 已提交
628 629
 * To make lock name printouts unique, we calculate a unique
 * class->name_version generation counter:
I
Ingo Molnar 已提交
630
 */
P
Peter Zijlstra 已提交
631
static int count_matching_names(struct lock_class *new_class)
I
Ingo Molnar 已提交
632
{
P
Peter Zijlstra 已提交
633 634
	struct lock_class *class;
	int count = 0;
I
Ingo Molnar 已提交
635

P
Peter Zijlstra 已提交
636
	if (!new_class->name)
I
Ingo Molnar 已提交
637 638
		return 0;

639
	list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
P
Peter Zijlstra 已提交
640 641 642 643 644
		if (new_class->key - new_class->subclass == class->key)
			return class->name_version;
		if (class->name && !strcmp(class->name, new_class->name))
			count = max(count, class->name_version);
	}
I
Ingo Molnar 已提交
645

P
Peter Zijlstra 已提交
646
	return count + 1;
I
Ingo Molnar 已提交
647 648
}

P
Peter Zijlstra 已提交
649
static inline struct lock_class *
650
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
I
Ingo Molnar 已提交
651
{
P
Peter Zijlstra 已提交
652
	struct lockdep_subclass_key *key;
653
	struct hlist_head *hash_head;
P
Peter Zijlstra 已提交
654
	struct lock_class *class;
I
Ingo Molnar 已提交
655

656 657 658 659 660 661 662 663 664 665
	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
		debug_locks_off();
		printk(KERN_ERR
			"BUG: looking up invalid subclass: %u\n", subclass);
		printk(KERN_ERR
			"turning off the locking correctness validator.\n");
		dump_stack();
		return NULL;
	}

P
Peter Zijlstra 已提交
666
	/*
667 668
	 * If it is not initialised then it has never been locked,
	 * so it won't be present in the hash table.
P
Peter Zijlstra 已提交
669
	 */
670 671
	if (unlikely(!lock->key))
		return NULL;
I
Ingo Molnar 已提交
672

P
Peter Zijlstra 已提交
673 674 675 676 677 678
	/*
	 * NOTE: the class-key must be unique. For dynamic locks, a static
	 * lock_class_key variable is passed in through the mutex_init()
	 * (or spin_lock_init()) call - which acts as the key. For static
	 * locks we use the lock object itself as the key.
	 */
P
Peter Zijlstra 已提交
679 680
	BUILD_BUG_ON(sizeof(struct lock_class_key) >
			sizeof(struct lockdep_map));
I
Ingo Molnar 已提交
681

P
Peter Zijlstra 已提交
682
	key = lock->key->subkeys + subclass;
683

P
Peter Zijlstra 已提交
684
	hash_head = classhashentry(key);
685

P
Peter Zijlstra 已提交
686
	/*
687
	 * We do an RCU walk of the hash, see lockdep_free_key_range().
P
Peter Zijlstra 已提交
688
	 */
689 690 691
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return NULL;

692
	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
P
Peter Zijlstra 已提交
693
		if (class->key == key) {
P
Peter Zijlstra 已提交
694 695 696 697
			/*
			 * Huh! same key, different name? Did someone trample
			 * on some memory? We're most confused.
			 */
P
Peter Zijlstra 已提交
698
			WARN_ON_ONCE(class->name != lock->name);
P
Peter Zijlstra 已提交
699
			return class;
P
Peter Zijlstra 已提交
700 701
		}
	}
I
Ingo Molnar 已提交
702

703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	return NULL;
}

/*
 * Static locks do not have their class-keys yet - for them the key is
 * the lock object itself. If the lock is in the per cpu area, the
 * canonical address of the lock (per cpu offset removed) is used.
 */
static bool assign_lock_key(struct lockdep_map *lock)
{
	unsigned long can_addr, addr = (unsigned long)lock;

	if (__is_kernel_percpu_address(addr, &can_addr))
		lock->key = (void *)can_addr;
	else if (__is_module_percpu_address(addr, &can_addr))
		lock->key = (void *)can_addr;
	else if (static_obj(lock))
		lock->key = (void *)lock;
	else {
		/* Debug-check: all keys must be persistent! */
		debug_locks_off();
		pr_err("INFO: trying to register non-static key.\n");
		pr_err("the code is fine but needs lockdep annotation.\n");
		pr_err("turning off the locking correctness validator.\n");
		dump_stack();
		return false;
	}

	return true;
I
Ingo Molnar 已提交
732 733 734
}

/*
P
Peter Zijlstra 已提交
735 736 737
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
I
Ingo Molnar 已提交
738
 */
739
static struct lock_class *
P
Peter Zijlstra 已提交
740
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
I
Ingo Molnar 已提交
741
{
P
Peter Zijlstra 已提交
742
	struct lockdep_subclass_key *key;
743
	struct hlist_head *hash_head;
P
Peter Zijlstra 已提交
744
	struct lock_class *class;
745 746

	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
P
Peter Zijlstra 已提交
747 748

	class = look_up_lock_class(lock, subclass);
749
	if (likely(class))
750
		goto out_set_class_cache;
P
Peter Zijlstra 已提交
751

752 753 754 755
	if (!lock->key) {
		if (!assign_lock_key(lock))
			return NULL;
	} else if (!static_obj(lock->key)) {
P
Peter Zijlstra 已提交
756 757 758 759 760 761 762 763 764 765 766 767 768
		return NULL;
	}

	key = lock->key->subkeys + subclass;
	hash_head = classhashentry(key);

	if (!graph_lock()) {
		return NULL;
	}
	/*
	 * We have to do the hash-walk again, to avoid races
	 * with another CPU:
	 */
769
	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
P
Peter Zijlstra 已提交
770 771
		if (class->key == key)
			goto out_unlock_set;
772 773
	}

P
Peter Zijlstra 已提交
774 775 776 777 778 779 780 781 782
	/*
	 * Allocate a new key from the static array, and add it to
	 * the hash:
	 */
	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
		if (!debug_locks_off_graph_unlock()) {
			return NULL;
		}

783
		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
784
		dump_stack();
P
Peter Zijlstra 已提交
785 786 787
		return NULL;
	}
	class = lock_classes + nr_lock_classes++;
788
	debug_atomic_inc(nr_unused_locks);
P
Peter Zijlstra 已提交
789 790 791 792 793 794 795 796 797 798 799
	class->key = key;
	class->name = lock->name;
	class->subclass = subclass;
	INIT_LIST_HEAD(&class->lock_entry);
	INIT_LIST_HEAD(&class->locks_before);
	INIT_LIST_HEAD(&class->locks_after);
	class->name_version = count_matching_names(class);
	/*
	 * We use RCU's safe list-add method to make
	 * parallel walking of the hash-list safe:
	 */
800
	hlist_add_head_rcu(&class->hash_entry, hash_head);
801 802 803 804
	/*
	 * Add it to the global list of classes:
	 */
	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
P
Peter Zijlstra 已提交
805 806 807 808

	if (verbose(class)) {
		graph_unlock();

809
		printk("\nnew class %px: %s", class->key, class->name);
P
Peter Zijlstra 已提交
810
		if (class->name_version > 1)
D
Dmitry Vyukov 已提交
811 812
			printk(KERN_CONT "#%d", class->name_version);
		printk(KERN_CONT "\n");
P
Peter Zijlstra 已提交
813 814 815 816 817 818 819 820 821
		dump_stack();

		if (!graph_lock()) {
			return NULL;
		}
	}
out_unlock_set:
	graph_unlock();

822
out_set_class_cache:
P
Peter Zijlstra 已提交
823
	if (!subclass || force)
824 825 826
		lock->class_cache[0] = class;
	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
		lock->class_cache[subclass] = class;
P
Peter Zijlstra 已提交
827

P
Peter Zijlstra 已提交
828 829 830 831
	/*
	 * Hash collision, did we smoke some? We found a class with a matching
	 * hash but the subclass -- which is hashed in -- didn't match.
	 */
P
Peter Zijlstra 已提交
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
		return NULL;

	return class;
}

#ifdef CONFIG_PROVE_LOCKING
/*
 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 * with NULL on failure)
 */
static struct lock_list *alloc_list_entry(void)
{
	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
		if (!debug_locks_off_graph_unlock())
			return NULL;

849
		print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
850
		dump_stack();
P
Peter Zijlstra 已提交
851 852 853 854 855 856 857 858
		return NULL;
	}
	return list_entries + nr_list_entries++;
}

/*
 * Add a new dependency to the head of the list:
 */
859 860 861
static int add_lock_to_list(struct lock_class *this, struct list_head *head,
			    unsigned long ip, int distance,
			    struct stack_trace *trace)
P
Peter Zijlstra 已提交
862 863 864 865 866 867 868 869 870 871
{
	struct lock_list *entry;
	/*
	 * Lock not present yet - get a new dependency struct and
	 * add it to the list:
	 */
	entry = alloc_list_entry();
	if (!entry)
		return 0;

872 873
	entry->class = this;
	entry->distance = distance;
Y
Yong Zhang 已提交
874
	entry->trace = *trace;
P
Peter Zijlstra 已提交
875
	/*
876 877 878
	 * Both allocation and removal are done under the graph lock; but
	 * iteration is under RCU-sched; see look_up_lock_class() and
	 * lockdep_free_key_range().
P
Peter Zijlstra 已提交
879 880 881 882 883 884
	 */
	list_add_tail_rcu(&entry->entry, head);

	return 1;
}

P
Peter Zijlstra 已提交
885 886 887
/*
 * For good efficiency of modular, we use power of 2
 */
P
Peter Zijlstra 已提交
888 889 890
#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)

P
Peter Zijlstra 已提交
891 892
/*
 * The circular_queue and helpers is used to implement the
P
Peter Zijlstra 已提交
893 894 895
 * breadth-first search(BFS)algorithem, by which we can build
 * the shortest path from the next lock to be acquired to the
 * previous held lock if there is a circular between them.
P
Peter Zijlstra 已提交
896
 */
P
Peter Zijlstra 已提交
897 898 899 900 901 902 903
struct circular_queue {
	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
	unsigned int  front, rear;
};

static struct circular_queue lock_cq;

904
unsigned int max_bfs_queue_depth;
P
Peter Zijlstra 已提交
905

906 907
static unsigned int lockdep_dependency_gen_id;

P
Peter Zijlstra 已提交
908 909 910
static inline void __cq_init(struct circular_queue *cq)
{
	cq->front = cq->rear = 0;
911
	lockdep_dependency_gen_id++;
P
Peter Zijlstra 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
}

static inline int __cq_empty(struct circular_queue *cq)
{
	return (cq->front == cq->rear);
}

static inline int __cq_full(struct circular_queue *cq)
{
	return ((cq->rear + 1) & CQ_MASK) == cq->front;
}

static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
{
	if (__cq_full(cq))
		return -1;

	cq->element[cq->rear] = elem;
	cq->rear = (cq->rear + 1) & CQ_MASK;
	return 0;
}

static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
{
	if (__cq_empty(cq))
		return -1;

	*elem = cq->element[cq->front];
	cq->front = (cq->front + 1) & CQ_MASK;
	return 0;
}

static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
{
	return (cq->rear - cq->front) & CQ_MASK;
}

static inline void mark_lock_accessed(struct lock_list *lock,
					struct lock_list *parent)
{
	unsigned long nr;
P
Peter Zijlstra 已提交
953

P
Peter Zijlstra 已提交
954
	nr = lock - list_entries;
P
Peter Zijlstra 已提交
955
	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
P
Peter Zijlstra 已提交
956
	lock->parent = parent;
957
	lock->class->dep_gen_id = lockdep_dependency_gen_id;
P
Peter Zijlstra 已提交
958 959 960 961 962
}

static inline unsigned long lock_accessed(struct lock_list *lock)
{
	unsigned long nr;
P
Peter Zijlstra 已提交
963

P
Peter Zijlstra 已提交
964
	nr = lock - list_entries;
P
Peter Zijlstra 已提交
965
	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
966
	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
P
Peter Zijlstra 已提交
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
}

static inline struct lock_list *get_lock_parent(struct lock_list *child)
{
	return child->parent;
}

static inline int get_lock_depth(struct lock_list *child)
{
	int depth = 0;
	struct lock_list *parent;

	while ((parent = get_lock_parent(child))) {
		child = parent;
		depth++;
	}
	return depth;
}

986
static int __bfs(struct lock_list *source_entry,
P
Peter Zijlstra 已提交
987 988 989 990
		 void *data,
		 int (*match)(struct lock_list *entry, void *data),
		 struct lock_list **target_entry,
		 int forward)
991 992
{
	struct lock_list *entry;
993
	struct list_head *head;
994 995 996
	struct circular_queue *cq = &lock_cq;
	int ret = 1;

997
	if (match(source_entry, data)) {
998 999 1000 1001 1002
		*target_entry = source_entry;
		ret = 0;
		goto exit;
	}

1003 1004 1005 1006 1007 1008 1009 1010 1011
	if (forward)
		head = &source_entry->class->locks_after;
	else
		head = &source_entry->class->locks_before;

	if (list_empty(head))
		goto exit;

	__cq_init(cq);
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	__cq_enqueue(cq, (unsigned long)source_entry);

	while (!__cq_empty(cq)) {
		struct lock_list *lock;

		__cq_dequeue(cq, (unsigned long *)&lock);

		if (!lock->class) {
			ret = -2;
			goto exit;
		}

		if (forward)
			head = &lock->class->locks_after;
		else
			head = &lock->class->locks_before;

1029 1030 1031
		DEBUG_LOCKS_WARN_ON(!irqs_disabled());

		list_for_each_entry_rcu(entry, head, entry) {
1032
			if (!lock_accessed(entry)) {
1033
				unsigned int cq_depth;
1034
				mark_lock_accessed(entry, lock);
1035
				if (match(entry, data)) {
1036 1037 1038 1039 1040 1041 1042 1043 1044
					*target_entry = entry;
					ret = 0;
					goto exit;
				}

				if (__cq_enqueue(cq, (unsigned long)entry)) {
					ret = -1;
					goto exit;
				}
1045 1046 1047
				cq_depth = __cq_get_elem_count(cq);
				if (max_bfs_queue_depth < cq_depth)
					max_bfs_queue_depth = cq_depth;
1048 1049 1050 1051 1052 1053 1054
			}
		}
	}
exit:
	return ret;
}

1055
static inline int __bfs_forwards(struct lock_list *src_entry,
1056 1057 1058
			void *data,
			int (*match)(struct lock_list *entry, void *data),
			struct lock_list **target_entry)
1059
{
1060
	return __bfs(src_entry, data, match, target_entry, 1);
1061 1062 1063

}

1064
static inline int __bfs_backwards(struct lock_list *src_entry,
1065 1066 1067
			void *data,
			int (*match)(struct lock_list *entry, void *data),
			struct lock_list **target_entry)
1068
{
1069
	return __bfs(src_entry, data, match, target_entry, 0);
1070 1071 1072

}

P
Peter Zijlstra 已提交
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
/*
 * Recursive, forwards-direction lock-dependency checking, used for
 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
 * checking.
 */

/*
 * Print a dependency chain entry (this is only done when a deadlock
 * has been detected):
 */
static noinline int
1084
print_circular_bug_entry(struct lock_list *target, int depth)
P
Peter Zijlstra 已提交
1085 1086 1087 1088 1089
{
	if (debug_locks_silent)
		return 0;
	printk("\n-> #%u", depth);
	print_lock_name(target->class);
D
Dmitry Vyukov 已提交
1090
	printk(KERN_CONT ":\n");
P
Peter Zijlstra 已提交
1091 1092 1093 1094 1095
	print_stack_trace(&target->trace, 6);

	return 0;
}

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
static void
print_circular_lock_scenario(struct held_lock *src,
			     struct held_lock *tgt,
			     struct lock_list *prt)
{
	struct lock_class *source = hlock_class(src);
	struct lock_class *target = hlock_class(tgt);
	struct lock_class *parent = prt->class;

	/*
	 * A direct locking problem where unsafe_class lock is taken
	 * directly by safe_class lock, then all we need to show
	 * is the deadlock scenario, as it is obvious that the
	 * unsafe lock is taken under the safe lock.
	 *
	 * But if there is a chain instead, where the safe lock takes
	 * an intermediate lock (middle_class) where this lock is
	 * not the same as the safe lock, then the lock chain is
	 * used to describe the problem. Otherwise we would need
	 * to show a different CPU case for each link in the chain
	 * from the safe_class lock to the unsafe_class lock.
	 */
	if (parent != source) {
		printk("Chain exists of:\n  ");
		__print_lock_name(source);
D
Dmitry Vyukov 已提交
1121
		printk(KERN_CONT " --> ");
1122
		__print_lock_name(parent);
D
Dmitry Vyukov 已提交
1123
		printk(KERN_CONT " --> ");
1124
		__print_lock_name(target);
D
Dmitry Vyukov 已提交
1125
		printk(KERN_CONT "\n\n");
1126 1127
	}

1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	printk(" Possible unsafe locking scenario:\n\n");
	printk("       CPU0                    CPU1\n");
	printk("       ----                    ----\n");
	printk("  lock(");
	__print_lock_name(target);
	printk(KERN_CONT ");\n");
	printk("                               lock(");
	__print_lock_name(parent);
	printk(KERN_CONT ");\n");
	printk("                               lock(");
	__print_lock_name(target);
	printk(KERN_CONT ");\n");
	printk("  lock(");
	__print_lock_name(source);
	printk(KERN_CONT ");\n");
	printk("\n *** DEADLOCK ***\n\n");
1144 1145
}

P
Peter Zijlstra 已提交
1146 1147 1148 1149 1150
/*
 * When a circular dependency is detected, print the
 * header first:
 */
static noinline int
1151 1152 1153
print_circular_bug_header(struct lock_list *entry, unsigned int depth,
			struct held_lock *check_src,
			struct held_lock *check_tgt)
P
Peter Zijlstra 已提交
1154 1155 1156
{
	struct task_struct *curr = current;

1157
	if (debug_locks_silent)
P
Peter Zijlstra 已提交
1158 1159
		return 0;

1160
	pr_warn("\n");
1161 1162
	pr_warn("======================================================\n");
	pr_warn("WARNING: possible circular locking dependency detected\n");
1163
	print_kernel_ident();
1164
	pr_warn("------------------------------------------------------\n");
1165
	pr_warn("%s/%d is trying to acquire lock:\n",
1166
		curr->comm, task_pid_nr(curr));
1167
	print_lock(check_src);
1168

1169
	pr_warn("\nbut task is already holding lock:\n");
1170

1171
	print_lock(check_tgt);
1172 1173
	pr_warn("\nwhich lock already depends on the new lock.\n\n");
	pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
P
Peter Zijlstra 已提交
1174 1175 1176 1177 1178 1179

	print_circular_bug_entry(entry, depth);

	return 0;
}

1180 1181 1182 1183 1184
static inline int class_equal(struct lock_list *entry, void *data)
{
	return entry->class == data;
}

1185 1186 1187
static noinline int print_circular_bug(struct lock_list *this,
				struct lock_list *target,
				struct held_lock *check_src,
1188 1189
				struct held_lock *check_tgt,
				struct stack_trace *trace)
P
Peter Zijlstra 已提交
1190 1191
{
	struct task_struct *curr = current;
1192
	struct lock_list *parent;
1193
	struct lock_list *first_parent;
1194
	int depth;
P
Peter Zijlstra 已提交
1195

1196
	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
P
Peter Zijlstra 已提交
1197 1198
		return 0;

1199
	if (!save_trace(&this->trace))
P
Peter Zijlstra 已提交
1200 1201
		return 0;

1202 1203
	depth = get_lock_depth(target);

1204
	print_circular_bug_header(target, depth, check_src, check_tgt);
1205 1206

	parent = get_lock_parent(target);
1207
	first_parent = parent;
1208 1209 1210 1211 1212

	while (parent) {
		print_circular_bug_entry(parent, --depth);
		parent = get_lock_parent(parent);
	}
P
Peter Zijlstra 已提交
1213 1214

	printk("\nother info that might help us debug this:\n\n");
1215 1216 1217
	print_circular_lock_scenario(check_src, check_tgt,
				     first_parent);

P
Peter Zijlstra 已提交
1218 1219 1220 1221 1222 1223 1224 1225
	lockdep_print_held_locks(curr);

	printk("\nstack backtrace:\n");
	dump_stack();

	return 0;
}

1226 1227 1228 1229 1230
static noinline int print_bfs_bug(int ret)
{
	if (!debug_locks_off_graph_unlock())
		return 0;

P
Peter Zijlstra 已提交
1231 1232 1233
	/*
	 * Breadth-first-search failed, graph got corrupted?
	 */
1234 1235 1236 1237 1238
	WARN(1, "lockdep bfs error:%d\n", ret);

	return 0;
}

1239
static int noop_count(struct lock_list *entry, void *data)
1240
{
1241 1242 1243
	(*(unsigned long *)data)++;
	return 0;
}
1244

1245
static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1246 1247 1248
{
	unsigned long  count = 0;
	struct lock_list *uninitialized_var(target_entry);
1249

1250
	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1251

1252
	return count;
1253 1254 1255 1256
}
unsigned long lockdep_count_forward_deps(struct lock_class *class)
{
	unsigned long ret, flags;
1257 1258 1259 1260
	struct lock_list this;

	this.parent = NULL;
	this.class = class;
1261

1262
	raw_local_irq_save(flags);
1263
	arch_spin_lock(&lockdep_lock);
1264
	ret = __lockdep_count_forward_deps(&this);
1265
	arch_spin_unlock(&lockdep_lock);
1266
	raw_local_irq_restore(flags);
1267 1268 1269 1270

	return ret;
}

1271
static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1272
{
1273 1274
	unsigned long  count = 0;
	struct lock_list *uninitialized_var(target_entry);
1275

1276
	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1277

1278
	return count;
1279 1280 1281 1282 1283
}

unsigned long lockdep_count_backward_deps(struct lock_class *class)
{
	unsigned long ret, flags;
1284 1285 1286 1287
	struct lock_list this;

	this.parent = NULL;
	this.class = class;
1288

1289
	raw_local_irq_save(flags);
1290
	arch_spin_lock(&lockdep_lock);
1291
	ret = __lockdep_count_backward_deps(&this);
1292
	arch_spin_unlock(&lockdep_lock);
1293
	raw_local_irq_restore(flags);
1294 1295 1296 1297

	return ret;
}

P
Peter Zijlstra 已提交
1298 1299 1300 1301 1302
/*
 * Prove that the dependency graph starting at <entry> can not
 * lead to <target>. Print an error and return 0 if it does.
 */
static noinline int
1303 1304
check_noncircular(struct lock_list *root, struct lock_class *target,
		struct lock_list **target_entry)
P
Peter Zijlstra 已提交
1305
{
1306
	int result;
P
Peter Zijlstra 已提交
1307

1308
	debug_atomic_inc(nr_cyclic_checks);
1309

1310
	result = __bfs_forwards(root, target, class_equal, target_entry);
I
Ingo Molnar 已提交
1311

1312 1313
	return result;
}
1314

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
static noinline int
check_redundant(struct lock_list *root, struct lock_class *target,
		struct lock_list **target_entry)
{
	int result;

	debug_atomic_inc(nr_redundant_checks);

	result = __bfs_forwards(root, target, class_equal, target_entry);

	return result;
}

1328
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
I
Ingo Molnar 已提交
1329 1330 1331 1332 1333 1334
/*
 * Forwards and backwards subgraph searching, for the purposes of
 * proving that two subgraphs can be connected by a new dependency
 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
 */

1335 1336 1337 1338 1339 1340 1341
static inline int usage_match(struct lock_list *entry, void *bit)
{
	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
}



I
Ingo Molnar 已提交
1342 1343
/*
 * Find a node in the forwards-direction dependency sub-graph starting
1344
 * at @root->class that matches @bit.
I
Ingo Molnar 已提交
1345
 *
1346 1347
 * Return 0 if such a node exists in the subgraph, and put that node
 * into *@target_entry.
I
Ingo Molnar 已提交
1348
 *
1349 1350
 * Return 1 otherwise and keep *@target_entry unchanged.
 * Return <0 on error.
I
Ingo Molnar 已提交
1351
 */
1352 1353 1354
static int
find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
			struct lock_list **target_entry)
I
Ingo Molnar 已提交
1355
{
1356
	int result;
I
Ingo Molnar 已提交
1357

1358
	debug_atomic_inc(nr_find_usage_forwards_checks);
I
Ingo Molnar 已提交
1359

1360 1361 1362
	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);

	return result;
I
Ingo Molnar 已提交
1363 1364 1365 1366
}

/*
 * Find a node in the backwards-direction dependency sub-graph starting
1367
 * at @root->class that matches @bit.
I
Ingo Molnar 已提交
1368
 *
1369 1370
 * Return 0 if such a node exists in the subgraph, and put that node
 * into *@target_entry.
I
Ingo Molnar 已提交
1371
 *
1372 1373
 * Return 1 otherwise and keep *@target_entry unchanged.
 * Return <0 on error.
I
Ingo Molnar 已提交
1374
 */
1375 1376 1377
static int
find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
			struct lock_list **target_entry)
I
Ingo Molnar 已提交
1378
{
1379
	int result;
I
Ingo Molnar 已提交
1380

1381
	debug_atomic_inc(nr_find_usage_backwards_checks);
I
Ingo Molnar 已提交
1382

1383
	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
D
Dave Jones 已提交
1384

1385
	return result;
I
Ingo Molnar 已提交
1386 1387
}

P
Peter Zijlstra 已提交
1388 1389 1390 1391 1392 1393
static void print_lock_class_header(struct lock_class *class, int depth)
{
	int bit;

	printk("%*s->", depth, "");
	print_lock_name(class);
D
Dmitry Vyukov 已提交
1394 1395
	printk(KERN_CONT " ops: %lu", class->ops);
	printk(KERN_CONT " {\n");
P
Peter Zijlstra 已提交
1396 1397 1398 1399 1400 1401

	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
		if (class->usage_mask & (1 << bit)) {
			int len = depth;

			len += printk("%*s   %s", depth, "", usage_str[bit]);
D
Dmitry Vyukov 已提交
1402
			len += printk(KERN_CONT " at:\n");
P
Peter Zijlstra 已提交
1403 1404 1405 1406 1407
			print_stack_trace(class->usage_traces + bit, len);
		}
	}
	printk("%*s }\n", depth, "");

1408
	printk("%*s ... key      at: [<%px>] %pS\n",
D
Dmitry Vyukov 已提交
1409
		depth, "", class->key, class->key);
P
Peter Zijlstra 已提交
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
}

/*
 * printk the shortest lock dependencies from @start to @end in reverse order:
 */
static void __used
print_shortest_lock_dependencies(struct lock_list *leaf,
				struct lock_list *root)
{
	struct lock_list *entry = leaf;
	int depth;

	/*compute depth from generated tree by BFS*/
	depth = get_lock_depth(leaf);

	do {
		print_lock_class_header(entry->class, depth);
		printk("%*s ... acquired at:\n", depth, "");
		print_stack_trace(&entry->trace, 2);
		printk("\n");

		if (depth == 0 && (entry != root)) {
1432
			printk("lockdep:%s bad path found in chain graph\n", __func__);
P
Peter Zijlstra 已提交
1433 1434 1435 1436 1437 1438 1439 1440 1441
			break;
		}

		entry = get_lock_parent(entry);
		depth--;
	} while (entry && (depth >= 0));

	return;
}
1442

1443 1444 1445
static void
print_irq_lock_scenario(struct lock_list *safe_entry,
			struct lock_list *unsafe_entry,
1446 1447
			struct lock_class *prev_class,
			struct lock_class *next_class)
1448 1449 1450
{
	struct lock_class *safe_class = safe_entry->class;
	struct lock_class *unsafe_class = unsafe_entry->class;
1451
	struct lock_class *middle_class = prev_class;
1452 1453

	if (middle_class == safe_class)
1454
		middle_class = next_class;
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471

	/*
	 * A direct locking problem where unsafe_class lock is taken
	 * directly by safe_class lock, then all we need to show
	 * is the deadlock scenario, as it is obvious that the
	 * unsafe lock is taken under the safe lock.
	 *
	 * But if there is a chain instead, where the safe lock takes
	 * an intermediate lock (middle_class) where this lock is
	 * not the same as the safe lock, then the lock chain is
	 * used to describe the problem. Otherwise we would need
	 * to show a different CPU case for each link in the chain
	 * from the safe_class lock to the unsafe_class lock.
	 */
	if (middle_class != unsafe_class) {
		printk("Chain exists of:\n  ");
		__print_lock_name(safe_class);
D
Dmitry Vyukov 已提交
1472
		printk(KERN_CONT " --> ");
1473
		__print_lock_name(middle_class);
D
Dmitry Vyukov 已提交
1474
		printk(KERN_CONT " --> ");
1475
		__print_lock_name(unsafe_class);
D
Dmitry Vyukov 已提交
1476
		printk(KERN_CONT "\n\n");
1477 1478 1479 1480 1481 1482 1483
	}

	printk(" Possible interrupt unsafe locking scenario:\n\n");
	printk("       CPU0                    CPU1\n");
	printk("       ----                    ----\n");
	printk("  lock(");
	__print_lock_name(unsafe_class);
D
Dmitry Vyukov 已提交
1484
	printk(KERN_CONT ");\n");
1485 1486 1487
	printk("                               local_irq_disable();\n");
	printk("                               lock(");
	__print_lock_name(safe_class);
D
Dmitry Vyukov 已提交
1488
	printk(KERN_CONT ");\n");
1489 1490
	printk("                               lock(");
	__print_lock_name(middle_class);
D
Dmitry Vyukov 已提交
1491
	printk(KERN_CONT ");\n");
1492 1493 1494
	printk("  <Interrupt>\n");
	printk("    lock(");
	__print_lock_name(safe_class);
D
Dmitry Vyukov 已提交
1495
	printk(KERN_CONT ");\n");
1496 1497 1498
	printk("\n *** DEADLOCK ***\n\n");
}

I
Ingo Molnar 已提交
1499 1500
static int
print_bad_irq_dependency(struct task_struct *curr,
1501 1502 1503 1504
			 struct lock_list *prev_root,
			 struct lock_list *next_root,
			 struct lock_list *backwards_entry,
			 struct lock_list *forwards_entry,
I
Ingo Molnar 已提交
1505 1506 1507 1508 1509 1510
			 struct held_lock *prev,
			 struct held_lock *next,
			 enum lock_usage_bit bit1,
			 enum lock_usage_bit bit2,
			 const char *irqclass)
{
1511
	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
I
Ingo Molnar 已提交
1512 1513
		return 0;

1514
	pr_warn("\n");
1515 1516
	pr_warn("=====================================================\n");
	pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
I
Ingo Molnar 已提交
1517
		irqclass, irqclass);
1518
	print_kernel_ident();
1519
	pr_warn("-----------------------------------------------------\n");
1520
	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1521
		curr->comm, task_pid_nr(curr),
I
Ingo Molnar 已提交
1522 1523 1524 1525 1526 1527
		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
		curr->hardirqs_enabled,
		curr->softirqs_enabled);
	print_lock(next);

1528
	pr_warn("\nand this task is already holding:\n");
I
Ingo Molnar 已提交
1529
	print_lock(prev);
1530
	pr_warn("which would create a new lock dependency:\n");
D
Dave Jones 已提交
1531
	print_lock_name(hlock_class(prev));
1532
	pr_cont(" ->");
D
Dave Jones 已提交
1533
	print_lock_name(hlock_class(next));
1534
	pr_cont("\n");
I
Ingo Molnar 已提交
1535

1536
	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
I
Ingo Molnar 已提交
1537
		irqclass);
1538
	print_lock_name(backwards_entry->class);
1539
	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
I
Ingo Molnar 已提交
1540

1541
	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
I
Ingo Molnar 已提交
1542

1543
	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1544
	print_lock_name(forwards_entry->class);
1545 1546
	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
	pr_warn("...");
I
Ingo Molnar 已提交
1547

1548
	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
I
Ingo Molnar 已提交
1549

1550
	pr_warn("\nother info that might help us debug this:\n\n");
1551 1552
	print_irq_lock_scenario(backwards_entry, forwards_entry,
				hlock_class(prev), hlock_class(next));
1553

I
Ingo Molnar 已提交
1554 1555
	lockdep_print_held_locks(curr);

1556
	pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1557 1558 1559
	if (!save_trace(&prev_root->trace))
		return 0;
	print_shortest_lock_dependencies(backwards_entry, prev_root);
I
Ingo Molnar 已提交
1560

1561 1562
	pr_warn("\nthe dependencies between the lock to be acquired");
	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1563 1564 1565
	if (!save_trace(&next_root->trace))
		return 0;
	print_shortest_lock_dependencies(forwards_entry, next_root);
I
Ingo Molnar 已提交
1566

1567
	pr_warn("\nstack backtrace:\n");
I
Ingo Molnar 已提交
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
	dump_stack();

	return 0;
}

static int
check_usage(struct task_struct *curr, struct held_lock *prev,
	    struct held_lock *next, enum lock_usage_bit bit_backwards,
	    enum lock_usage_bit bit_forwards, const char *irqclass)
{
	int ret;
1579
	struct lock_list this, that;
1580
	struct lock_list *uninitialized_var(target_entry);
1581
	struct lock_list *uninitialized_var(target_entry1);
1582 1583 1584 1585 1586

	this.parent = NULL;

	this.class = hlock_class(prev);
	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
P
Peter Zijlstra 已提交
1587 1588 1589 1590
	if (ret < 0)
		return print_bfs_bug(ret);
	if (ret == 1)
		return ret;
1591

1592 1593 1594
	that.parent = NULL;
	that.class = hlock_class(next);
	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
P
Peter Zijlstra 已提交
1595 1596 1597 1598
	if (ret < 0)
		return print_bfs_bug(ret);
	if (ret == 1)
		return ret;
I
Ingo Molnar 已提交
1599

1600 1601 1602
	return print_bad_irq_dependency(curr, &this, &that,
			target_entry, target_entry1,
			prev, next,
I
Ingo Molnar 已提交
1603 1604 1605
			bit_backwards, bit_forwards, irqclass);
}

1606 1607
static const char *state_names[] = {
#define LOCKDEP_STATE(__STATE) \
P
Peter Zijlstra 已提交
1608
	__stringify(__STATE),
1609 1610 1611 1612 1613 1614
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};

static const char *state_rnames[] = {
#define LOCKDEP_STATE(__STATE) \
P
Peter Zijlstra 已提交
1615
	__stringify(__STATE)"-READ",
1616 1617 1618 1619 1620
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};

static inline const char *state_name(enum lock_usage_bit bit)
P
Peter Zijlstra 已提交
1621
{
1622 1623
	return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
}
P
Peter Zijlstra 已提交
1624

1625 1626
static int exclusive_bit(int new_bit)
{
P
Peter Zijlstra 已提交
1627
	/*
1628 1629 1630 1631 1632 1633 1634 1635
	 * USED_IN
	 * USED_IN_READ
	 * ENABLED
	 * ENABLED_READ
	 *
	 * bit 0 - write/read
	 * bit 1 - used_in/enabled
	 * bit 2+  state
P
Peter Zijlstra 已提交
1636
	 */
1637 1638 1639

	int state = new_bit & ~3;
	int dir = new_bit & 2;
P
Peter Zijlstra 已提交
1640 1641

	/*
1642
	 * keep state, bit flip the direction and strip read.
P
Peter Zijlstra 已提交
1643
	 */
1644 1645 1646 1647 1648 1649
	return state | (dir ^ 2);
}

static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
			   struct held_lock *next, enum lock_usage_bit bit)
{
P
Peter Zijlstra 已提交
1650
	/*
1651 1652
	 * Prove that the new dependency does not connect a hardirq-safe
	 * lock with a hardirq-unsafe lock - to achieve this we search
P
Peter Zijlstra 已提交
1653 1654 1655
	 * the backwards-subgraph starting at <prev>, and the
	 * forwards-subgraph starting at <next>:
	 */
1656 1657
	if (!check_usage(curr, prev, next, bit,
			   exclusive_bit(bit), state_name(bit)))
P
Peter Zijlstra 已提交
1658 1659
		return 0;

1660 1661
	bit++; /* _READ */

1662
	/*
1663 1664
	 * Prove that the new dependency does not connect a hardirq-safe-read
	 * lock with a hardirq-unsafe lock - to achieve this we search
1665 1666 1667
	 * the backwards-subgraph starting at <prev>, and the
	 * forwards-subgraph starting at <next>:
	 */
1668 1669
	if (!check_usage(curr, prev, next, bit,
			   exclusive_bit(bit), state_name(bit)))
1670 1671
		return 0;

1672 1673 1674 1675 1676 1677 1678 1679 1680
	return 1;
}

static int
check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
		struct held_lock *next)
{
#define LOCKDEP_STATE(__STATE)						\
	if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE))	\
1681
		return 0;
1682 1683
#include "lockdep_states.h"
#undef LOCKDEP_STATE
1684

P
Peter Zijlstra 已提交
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
	return 1;
}

static void inc_chains(void)
{
	if (current->hardirq_context)
		nr_hardirq_chains++;
	else {
		if (current->softirq_context)
			nr_softirq_chains++;
		else
			nr_process_chains++;
	}
}

#else

static inline int
check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
		struct held_lock *next)
{
	return 1;
}

static inline void inc_chains(void)
{
	nr_process_chains++;
}

I
Ingo Molnar 已提交
1714 1715
#endif

1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
static void
print_deadlock_scenario(struct held_lock *nxt,
			     struct held_lock *prv)
{
	struct lock_class *next = hlock_class(nxt);
	struct lock_class *prev = hlock_class(prv);

	printk(" Possible unsafe locking scenario:\n\n");
	printk("       CPU0\n");
	printk("       ----\n");
	printk("  lock(");
	__print_lock_name(prev);
D
Dmitry Vyukov 已提交
1728
	printk(KERN_CONT ");\n");
1729 1730
	printk("  lock(");
	__print_lock_name(next);
D
Dmitry Vyukov 已提交
1731
	printk(KERN_CONT ");\n");
1732 1733 1734 1735
	printk("\n *** DEADLOCK ***\n\n");
	printk(" May be due to missing lock nesting notation\n\n");
}

I
Ingo Molnar 已提交
1736 1737 1738 1739
static int
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
		   struct held_lock *next)
{
1740
	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
I
Ingo Molnar 已提交
1741 1742
		return 0;

1743
	pr_warn("\n");
1744 1745
	pr_warn("============================================\n");
	pr_warn("WARNING: possible recursive locking detected\n");
1746
	print_kernel_ident();
1747
	pr_warn("--------------------------------------------\n");
1748
	pr_warn("%s/%d is trying to acquire lock:\n",
1749
		curr->comm, task_pid_nr(curr));
I
Ingo Molnar 已提交
1750
	print_lock(next);
1751
	pr_warn("\nbut task is already holding lock:\n");
I
Ingo Molnar 已提交
1752 1753
	print_lock(prev);

1754
	pr_warn("\nother info that might help us debug this:\n");
1755
	print_deadlock_scenario(next, prev);
I
Ingo Molnar 已提交
1756 1757
	lockdep_print_held_locks(curr);

1758
	pr_warn("\nstack backtrace:\n");
I
Ingo Molnar 已提交
1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	dump_stack();

	return 0;
}

/*
 * Check whether we are holding such a class already.
 *
 * (Note that this has to be done separately, because the graph cannot
 * detect such classes of deadlocks.)
 *
 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
 */
static int
check_deadlock(struct task_struct *curr, struct held_lock *next,
	       struct lockdep_map *next_instance, int read)
{
	struct held_lock *prev;
P
Peter Zijlstra 已提交
1777
	struct held_lock *nest = NULL;
I
Ingo Molnar 已提交
1778 1779 1780 1781
	int i;

	for (i = 0; i < curr->lockdep_depth; i++) {
		prev = curr->held_locks + i;
P
Peter Zijlstra 已提交
1782 1783 1784 1785

		if (prev->instance == next->nest_lock)
			nest = prev;

D
Dave Jones 已提交
1786
		if (hlock_class(prev) != hlock_class(next))
I
Ingo Molnar 已提交
1787
			continue;
P
Peter Zijlstra 已提交
1788

I
Ingo Molnar 已提交
1789 1790
		/*
		 * Allow read-after-read recursion of the same
1791
		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
I
Ingo Molnar 已提交
1792
		 */
1793
		if ((read == 2) && prev->read)
I
Ingo Molnar 已提交
1794
			return 2;
P
Peter Zijlstra 已提交
1795 1796 1797 1798 1799 1800 1801 1802

		/*
		 * We're holding the nest_lock, which serializes this lock's
		 * nesting behaviour.
		 */
		if (nest)
			return 2;

I
Ingo Molnar 已提交
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
		return print_deadlock_bug(curr, prev, next);
	}
	return 1;
}

/*
 * There was a chain-cache miss, and we are about to add a new dependency
 * to a previous lock. We recursively validate the following rules:
 *
 *  - would the adding of the <prev> -> <next> dependency create a
 *    circular dependency in the graph? [== circular deadlock]
 *
 *  - does the new prev->next dependency connect any hardirq-safe lock
 *    (in the full backwards-subgraph starting at <prev>) with any
 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
 *    <next>)? [== illegal lock inversion with hardirq contexts]
 *
 *  - does the new prev->next dependency connect any softirq-safe lock
 *    (in the full backwards-subgraph starting at <prev>) with any
 *    softirq-unsafe lock (in the full forwards-subgraph starting at
 *    <next>)? [== illegal lock inversion with softirq contexts]
 *
 * any of these scenarios could lead to a deadlock.
 *
 * Then if all the validations pass, we add the forwards and backwards
 * dependency.
 */
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
1832 1833
	       struct held_lock *next, int distance, struct stack_trace *trace,
	       int (*save)(struct stack_trace *trace))
I
Ingo Molnar 已提交
1834
{
1835
	struct lock_list *uninitialized_var(target_entry);
I
Ingo Molnar 已提交
1836
	struct lock_list *entry;
1837
	struct lock_list this;
1838
	int ret;
I
Ingo Molnar 已提交
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848

	/*
	 * Prove that the new <prev> -> <next> dependency would not
	 * create a circular dependency in the graph. (We do this by
	 * forward-recursing into the graph starting at <next>, and
	 * checking whether we can reach <prev>.)
	 *
	 * We are using global variables to control the recursion, to
	 * keep the stackframe size of the recursive functions low:
	 */
1849 1850 1851
	this.class = hlock_class(next);
	this.parent = NULL;
	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1852 1853 1854 1855 1856 1857 1858 1859 1860
	if (unlikely(!ret)) {
		if (!trace->entries) {
			/*
			 * If @save fails here, the printing might trigger
			 * a WARN but because of the !nr_entries it should
			 * not do bad things.
			 */
			save(trace);
		}
1861
		return print_circular_bug(&this, target_entry, next, prev, trace);
1862
	}
1863 1864
	else if (unlikely(ret < 0))
		return print_bfs_bug(ret);
1865

P
Peter Zijlstra 已提交
1866
	if (!check_prev_add_irq(curr, prev, next))
I
Ingo Molnar 已提交
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
		return 0;

	/*
	 * For recursive read-locks we do all the dependency checks,
	 * but we dont store read-triggered dependencies (only
	 * write-triggered dependencies). This ensures that only the
	 * write-side dependencies matter, and that if for example a
	 * write-lock never takes any other locks, then the reads are
	 * equivalent to a NOP.
	 */
	if (next->read == 2 || prev->read == 2)
		return 1;
	/*
	 * Is the <prev> -> <next> dependency already present?
	 *
	 * (this may occur even though this is a new chain: consider
	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
	 *  chains - the second one will be new, but L1 already has
	 *  L2 added to its dependency list, due to the first chain.)
	 */
D
Dave Jones 已提交
1887 1888
	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
		if (entry->class == hlock_class(next)) {
1889 1890
			if (distance == 1)
				entry->distance = 1;
1891
			return 1;
1892
		}
I
Ingo Molnar 已提交
1893 1894
	}

1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
	/*
	 * Is the <prev> -> <next> link redundant?
	 */
	this.class = hlock_class(prev);
	this.parent = NULL;
	ret = check_redundant(&this, hlock_class(next), &target_entry);
	if (!ret) {
		debug_atomic_inc(nr_redundant);
		return 2;
	}
	if (ret < 0)
		return print_bfs_bug(ret);


1909
	if (!trace->entries && !save(trace))
1910
		return 0;
Y
Yong Zhang 已提交
1911

I
Ingo Molnar 已提交
1912 1913 1914 1915
	/*
	 * Ok, all validations passed, add the new lock
	 * to the previous lock's dependency list:
	 */
1916
	ret = add_lock_to_list(hlock_class(next),
D
Dave Jones 已提交
1917
			       &hlock_class(prev)->locks_after,
1918
			       next->acquire_ip, distance, trace);
1919

I
Ingo Molnar 已提交
1920 1921
	if (!ret)
		return 0;
1922

1923
	ret = add_lock_to_list(hlock_class(prev),
D
Dave Jones 已提交
1924
			       &hlock_class(next)->locks_before,
1925
			       next->acquire_ip, distance, trace);
1926 1927
	if (!ret)
		return 0;
I
Ingo Molnar 已提交
1928

1929
	return 2;
P
Peter Zijlstra 已提交
1930
}
I
Ingo Molnar 已提交
1931

P
Peter Zijlstra 已提交
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
/*
 * Add the dependency to all directly-previous locks that are 'relevant'.
 * The ones that are relevant are (in increasing distance from curr):
 * all consecutive trylock entries and the final non-trylock entry - or
 * the end of this context's lock-chain - whichever comes first.
 */
static int
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
	int depth = curr->lockdep_depth;
	struct held_lock *hlock;
1943 1944 1945 1946 1947 1948
	struct stack_trace trace = {
		.nr_entries = 0,
		.max_entries = 0,
		.entries = NULL,
		.skip = 0,
	};
1949

I
Ingo Molnar 已提交
1950
	/*
P
Peter Zijlstra 已提交
1951 1952 1953
	 * Debugging checks.
	 *
	 * Depth must not be zero for a non-head lock:
I
Ingo Molnar 已提交
1954
	 */
P
Peter Zijlstra 已提交
1955 1956
	if (!depth)
		goto out_bug;
I
Ingo Molnar 已提交
1957
	/*
P
Peter Zijlstra 已提交
1958 1959
	 * At least two relevant locks must exist for this
	 * to be a head:
I
Ingo Molnar 已提交
1960
	 */
P
Peter Zijlstra 已提交
1961 1962 1963
	if (curr->held_locks[depth].irq_context !=
			curr->held_locks[depth-1].irq_context)
		goto out_bug;
1964

P
Peter Zijlstra 已提交
1965 1966
	for (;;) {
		int distance = curr->lockdep_depth - depth + 1;
1967
		hlock = curr->held_locks + depth - 1;
1968

P
Peter Zijlstra 已提交
1969
		/*
1970 1971
		 * Only non-recursive-read entries get new dependencies
		 * added:
P
Peter Zijlstra 已提交
1972
		 */
1973 1974 1975 1976 1977
		if (hlock->read != 2 && hlock->check) {
			int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
			if (!ret)
				return 0;

1978
			/*
1979 1980 1981 1982
			 * Stop after the first non-trylock entry,
			 * as non-trylock entries have added their
			 * own direct dependencies already, so this
			 * lock is connected to them indirectly:
1983
			 */
1984 1985
			if (!hlock->trylock)
				break;
1986
		}
1987

P
Peter Zijlstra 已提交
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
		depth--;
		/*
		 * End of lock-stack?
		 */
		if (!depth)
			break;
		/*
		 * Stop the search if we cross into another context:
		 */
		if (curr->held_locks[depth].irq_context !=
				curr->held_locks[depth-1].irq_context)
			break;
I
Ingo Molnar 已提交
2000
	}
P
Peter Zijlstra 已提交
2001 2002 2003 2004
	return 1;
out_bug:
	if (!debug_locks_off_graph_unlock())
		return 0;
I
Ingo Molnar 已提交
2005

P
Peter Zijlstra 已提交
2006 2007 2008 2009 2010
	/*
	 * Clearly we all shouldn't be here, but since we made it we
	 * can reliable say we messed up our state. See the above two
	 * gotos for reasons why we could possibly end up here.
	 */
P
Peter Zijlstra 已提交
2011
	WARN_ON(1);
I
Ingo Molnar 已提交
2012

P
Peter Zijlstra 已提交
2013
	return 0;
I
Ingo Molnar 已提交
2014 2015
}

P
Peter Zijlstra 已提交
2016
unsigned long nr_lock_chains;
2017
struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2018
int nr_chain_hlocks;
2019 2020 2021 2022 2023 2024
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];

struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
{
	return lock_classes + chain_hlocks[chain->base + i];
}
P
Peter Zijlstra 已提交
2025

2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044
/*
 * Returns the index of the first held_lock of the current chain
 */
static inline int get_first_held_lock(struct task_struct *curr,
					struct held_lock *hlock)
{
	int i;
	struct held_lock *hlock_curr;

	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
		hlock_curr = curr->held_locks + i;
		if (hlock_curr->irq_context != hlock->irq_context)
			break;

	}

	return ++i;
}

2045
#ifdef CONFIG_DEBUG_LOCKDEP
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
/*
 * Returns the next chain_key iteration
 */
static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
{
	u64 new_chain_key = iterate_chain_key(chain_key, class_idx);

	printk(" class_idx:%d -> chain_key:%016Lx",
		class_idx,
		(unsigned long long)new_chain_key);
	return new_chain_key;
}

static void
print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
{
	struct held_lock *hlock;
	u64 chain_key = 0;
	int depth = curr->lockdep_depth;
	int i;

	printk("depth: %u\n", depth + 1);
	for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
		hlock = curr->held_locks + i;
		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);

		print_lock(hlock);
	}

	print_chain_key_iteration(hlock_next->class_idx, chain_key);
	print_lock(hlock_next);
}

static void print_chain_keys_chain(struct lock_chain *chain)
{
	int i;
	u64 chain_key = 0;
	int class_id;

	printk("depth: %u\n", chain->depth);
	for (i = 0; i < chain->depth; i++) {
		class_id = chain_hlocks[chain->base + i];
		chain_key = print_chain_key_iteration(class_id + 1, chain_key);

		print_lock_name(lock_classes + class_id);
		printk("\n");
	}
}

static void print_collision(struct task_struct *curr,
			struct held_lock *hlock_next,
			struct lock_chain *chain)
{
2099
	pr_warn("\n");
2100 2101
	pr_warn("============================\n");
	pr_warn("WARNING: chain_key collision\n");
2102
	print_kernel_ident();
2103
	pr_warn("----------------------------\n");
2104 2105
	pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
	pr_warn("Hash chain already cached but the contents don't match!\n");
2106

2107
	pr_warn("Held locks:");
2108 2109
	print_chain_keys_held_locks(curr, hlock_next);

2110
	pr_warn("Locks in cached chain:");
2111 2112
	print_chain_keys_chain(chain);

2113
	pr_warn("\nstack backtrace:\n");
2114 2115
	dump_stack();
}
2116
#endif
2117

2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
/*
 * Checks whether the chain and the current held locks are consistent
 * in depth and also in content. If they are not it most likely means
 * that there was a collision during the calculation of the chain_key.
 * Returns: 0 not passed, 1 passed
 */
static int check_no_collision(struct task_struct *curr,
			struct held_lock *hlock,
			struct lock_chain *chain)
{
#ifdef CONFIG_DEBUG_LOCKDEP
	int i, j, id;

	i = get_first_held_lock(curr, hlock);

2133 2134
	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
		print_collision(curr, hlock, chain);
2135
		return 0;
2136
	}
2137 2138 2139 2140

	for (j = 0; j < chain->depth - 1; j++, i++) {
		id = curr->held_locks[i].class_idx - 1;

2141 2142
		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
			print_collision(curr, hlock, chain);
2143
			return 0;
2144
		}
2145 2146 2147 2148 2149
	}
#endif
	return 1;
}

2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
/*
 * This is for building a chain between just two different classes,
 * instead of adding a new hlock upon current, which is done by
 * add_chain_cache().
 *
 * This can be called in any context with two classes, while
 * add_chain_cache() must be done within the lock owener's context
 * since it uses hlock which might be racy in another context.
 */
static inline int add_chain_cache_classes(unsigned int prev,
					  unsigned int next,
					  unsigned int irq_context,
					  u64 chain_key)
{
	struct hlist_head *hash_head = chainhashentry(chain_key);
	struct lock_chain *chain;

	/*
	 * Allocate a new chain entry from the static array, and add
	 * it to the hash:
	 */

	/*
	 * We might need to take the graph lock, ensure we've got IRQs
	 * disabled to make this an IRQ-safe lock.. for recursion reasons
	 * lockdep won't complain about its own locking errors.
	 */
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return 0;

	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
		if (!debug_locks_off_graph_unlock())
			return 0;

		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
		dump_stack();
		return 0;
	}

	chain = lock_chains + nr_lock_chains++;
	chain->chain_key = chain_key;
	chain->irq_context = irq_context;
	chain->depth = 2;
	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
		chain->base = nr_chain_hlocks;
		nr_chain_hlocks += chain->depth;
		chain_hlocks[chain->base] = prev - 1;
		chain_hlocks[chain->base + 1] = next -1;
	}
#ifdef CONFIG_DEBUG_LOCKDEP
	/*
	 * Important for check_no_collision().
	 */
	else {
		if (!debug_locks_off_graph_unlock())
			return 0;

		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
		dump_stack();
		return 0;
	}
#endif

	hlist_add_head_rcu(&chain->entry, hash_head);
	debug_atomic_inc(chain_lookup_misses);
	inc_chains();

	return 1;
}

I
Ingo Molnar 已提交
2220
/*
2221 2222 2223 2224 2225
 * Adds a dependency chain into chain hashtable. And must be called with
 * graph_lock held.
 *
 * Return 0 if fail, and graph_lock is released.
 * Return 1 if succeed, with graph_lock held.
I
Ingo Molnar 已提交
2226
 */
2227 2228 2229
static inline int add_chain_cache(struct task_struct *curr,
				  struct held_lock *hlock,
				  u64 chain_key)
I
Ingo Molnar 已提交
2230
{
D
Dave Jones 已提交
2231
	struct lock_class *class = hlock_class(hlock);
2232
	struct hlist_head *hash_head = chainhashentry(chain_key);
I
Ingo Molnar 已提交
2233
	struct lock_chain *chain;
2234
	int i, j;
I
Ingo Molnar 已提交
2235

2236 2237 2238 2239 2240
	/*
	 * Allocate a new chain entry from the static array, and add
	 * it to the hash:
	 */

P
Peter Zijlstra 已提交
2241 2242 2243 2244 2245
	/*
	 * We might need to take the graph lock, ensure we've got IRQs
	 * disabled to make this an IRQ-safe lock.. for recursion reasons
	 * lockdep won't complain about its own locking errors.
	 */
2246 2247
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return 0;
2248

I
Ingo Molnar 已提交
2249
	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2250 2251 2252
		if (!debug_locks_off_graph_unlock())
			return 0;

2253
		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2254
		dump_stack();
I
Ingo Molnar 已提交
2255 2256 2257 2258
		return 0;
	}
	chain = lock_chains + nr_lock_chains++;
	chain->chain_key = chain_key;
2259
	chain->irq_context = hlock->irq_context;
2260
	i = get_first_held_lock(curr, hlock);
2261
	chain->depth = curr->lockdep_depth + 1 - i;
2262 2263 2264 2265 2266

	BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
	BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));

2267 2268
	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
		chain->base = nr_chain_hlocks;
2269
		for (j = 0; j < chain->depth - 1; j++, i++) {
D
Dave Jones 已提交
2270
			int lock_id = curr->held_locks[i].class_idx - 1;
2271 2272 2273 2274
			chain_hlocks[chain->base + j] = lock_id;
		}
		chain_hlocks[chain->base + j] = class - lock_classes;
	}
2275 2276 2277 2278 2279 2280 2281 2282 2283

	if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
		nr_chain_hlocks += chain->depth;

#ifdef CONFIG_DEBUG_LOCKDEP
	/*
	 * Important for check_no_collision().
	 */
	if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2284
		if (!debug_locks_off_graph_unlock())
2285 2286 2287 2288 2289 2290 2291 2292
			return 0;

		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
		dump_stack();
		return 0;
	}
#endif

2293
	hlist_add_head_rcu(&chain->entry, hash_head);
2294
	debug_atomic_inc(chain_lookup_misses);
P
Peter Zijlstra 已提交
2295 2296 2297 2298 2299
	inc_chains();

	return 1;
}

2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
/*
 * Look up a dependency chain.
 */
static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
{
	struct hlist_head *hash_head = chainhashentry(chain_key);
	struct lock_chain *chain;

	/*
	 * We can walk it lock-free, because entries only get added
	 * to the hash:
	 */
	hlist_for_each_entry_rcu(chain, hash_head, entry) {
		if (chain->chain_key == chain_key) {
			debug_atomic_inc(chain_lookup_hits);
			return chain;
		}
	}
	return NULL;
}

/*
 * If the key is not present yet in dependency chain cache then
 * add it and return 1 - in this case the new dependency chain is
 * validated. If the key is already hashed, return 0.
 * (On return with 1 graph_lock is held.)
 */
static inline int lookup_chain_cache_add(struct task_struct *curr,
					 struct held_lock *hlock,
					 u64 chain_key)
{
	struct lock_class *class = hlock_class(hlock);
	struct lock_chain *chain = lookup_chain_cache(chain_key);

	if (chain) {
cache_hit:
		if (!check_no_collision(curr, hlock, chain))
			return 0;

		if (very_verbose(class)) {
			printk("\nhash chain already cached, key: "
2341
					"%016Lx tail class: [%px] %s\n",
2342 2343 2344 2345 2346 2347 2348 2349
					(unsigned long long)chain_key,
					class->key, class->name);
		}

		return 0;
	}

	if (very_verbose(class)) {
2350
		printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
			(unsigned long long)chain_key, class->key, class->name);
	}

	if (!graph_lock())
		return 0;

	/*
	 * We have to walk the chain again locked - to avoid duplicates:
	 */
	chain = lookup_chain_cache(chain_key);
	if (chain) {
		graph_unlock();
		goto cache_hit;
	}

	if (!add_chain_cache(curr, hlock, chain_key))
		return 0;

	return 1;
}

P
Peter Zijlstra 已提交
2372
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2373
		struct held_lock *hlock, int chain_head, u64 chain_key)
P
Peter Zijlstra 已提交
2374 2375 2376 2377 2378 2379 2380 2381
{
	/*
	 * Trylock needs to maintain the stack of held locks, but it
	 * does not add new dependencies, because trylock can be done
	 * in any order.
	 *
	 * We look up the chain_key and do the O(N^2) check and update of
	 * the dependencies only if this is a new dependency chain.
2382
	 * (If lookup_chain_cache_add() return with 1 it acquires
P
Peter Zijlstra 已提交
2383 2384
	 * graph_lock for us)
	 */
2385
	if (!hlock->trylock && hlock->check &&
2386
	    lookup_chain_cache_add(curr, hlock, chain_key)) {
P
Peter Zijlstra 已提交
2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
		/*
		 * Check whether last held lock:
		 *
		 * - is irq-safe, if this lock is irq-unsafe
		 * - is softirq-safe, if this lock is hardirq-unsafe
		 *
		 * And check whether the new lock's dependency graph
		 * could lead back to the previous lock.
		 *
		 * any of these scenarios could lead to a deadlock. If
		 * All validations
		 */
		int ret = check_deadlock(curr, hlock, lock, hlock->read);

		if (!ret)
			return 0;
		/*
		 * Mark recursive read, as we jump over it when
		 * building dependencies (just like we jump over
		 * trylock entries):
		 */
		if (ret == 2)
			hlock->read = 2;
		/*
		 * Add dependency only if this lock is not the head
		 * of the chain, and if it's not a secondary read-lock:
		 */
2414
		if (!chain_head && ret != 2) {
P
Peter Zijlstra 已提交
2415 2416
			if (!check_prevs_add(curr, hlock))
				return 0;
2417 2418
		}

P
Peter Zijlstra 已提交
2419
		graph_unlock();
2420 2421
	} else {
		/* after lookup_chain_cache_add(): */
P
Peter Zijlstra 已提交
2422 2423
		if (unlikely(!debug_locks))
			return 0;
2424
	}
I
Ingo Molnar 已提交
2425 2426 2427

	return 1;
}
P
Peter Zijlstra 已提交
2428 2429 2430
#else
static inline int validate_chain(struct task_struct *curr,
	       	struct lockdep_map *lock, struct held_lock *hlock,
2431
		int chain_head, u64 chain_key)
P
Peter Zijlstra 已提交
2432 2433 2434
{
	return 1;
}
2435
#endif
I
Ingo Molnar 已提交
2436 2437 2438 2439 2440

/*
 * We are building curr_chain_key incrementally, so double-check
 * it from scratch, to make sure that it's done correctly:
 */
2441
static void check_chain_key(struct task_struct *curr)
I
Ingo Molnar 已提交
2442 2443 2444
{
#ifdef CONFIG_DEBUG_LOCKDEP
	struct held_lock *hlock, *prev_hlock = NULL;
2445
	unsigned int i;
I
Ingo Molnar 已提交
2446 2447 2448 2449 2450 2451
	u64 chain_key = 0;

	for (i = 0; i < curr->lockdep_depth; i++) {
		hlock = curr->held_locks + i;
		if (chain_key != hlock->prev_chain_key) {
			debug_locks_off();
P
Peter Zijlstra 已提交
2452 2453 2454 2455
			/*
			 * We got mighty confused, our chain keys don't match
			 * with what we expect, someone trample on our task state?
			 */
2456
			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
I
Ingo Molnar 已提交
2457 2458 2459 2460 2461
				curr->lockdep_depth, i,
				(unsigned long long)chain_key,
				(unsigned long long)hlock->prev_chain_key);
			return;
		}
P
Peter Zijlstra 已提交
2462 2463 2464
		/*
		 * Whoops ran out of static storage again?
		 */
2465
		if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2466 2467
			return;

I
Ingo Molnar 已提交
2468 2469 2470
		if (prev_hlock && (prev_hlock->irq_context !=
							hlock->irq_context))
			chain_key = 0;
2471
		chain_key = iterate_chain_key(chain_key, hlock->class_idx);
I
Ingo Molnar 已提交
2472 2473 2474 2475
		prev_hlock = hlock;
	}
	if (chain_key != curr->curr_chain_key) {
		debug_locks_off();
P
Peter Zijlstra 已提交
2476 2477 2478 2479
		/*
		 * More smoking hash instead of calculating it, damn see these
		 * numbers float.. I bet that a pink elephant stepped on my memory.
		 */
2480
		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
I
Ingo Molnar 已提交
2481 2482 2483 2484 2485 2486 2487
			curr->lockdep_depth, i,
			(unsigned long long)chain_key,
			(unsigned long long)curr->curr_chain_key);
	}
#endif
}

2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
static void
print_usage_bug_scenario(struct held_lock *lock)
{
	struct lock_class *class = hlock_class(lock);

	printk(" Possible unsafe locking scenario:\n\n");
	printk("       CPU0\n");
	printk("       ----\n");
	printk("  lock(");
	__print_lock_name(class);
D
Dmitry Vyukov 已提交
2498
	printk(KERN_CONT ");\n");
2499 2500 2501
	printk("  <Interrupt>\n");
	printk("    lock(");
	__print_lock_name(class);
D
Dmitry Vyukov 已提交
2502
	printk(KERN_CONT ");\n");
2503 2504 2505
	printk("\n *** DEADLOCK ***\n\n");
}

P
Peter Zijlstra 已提交
2506 2507 2508 2509 2510 2511 2512
static int
print_usage_bug(struct task_struct *curr, struct held_lock *this,
		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
		return 0;

2513
	pr_warn("\n");
2514 2515
	pr_warn("================================\n");
	pr_warn("WARNING: inconsistent lock state\n");
2516
	print_kernel_ident();
2517
	pr_warn("--------------------------------\n");
P
Peter Zijlstra 已提交
2518

2519
	pr_warn("inconsistent {%s} -> {%s} usage.\n",
P
Peter Zijlstra 已提交
2520 2521
		usage_str[prev_bit], usage_str[new_bit]);

2522
	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2523
		curr->comm, task_pid_nr(curr),
P
Peter Zijlstra 已提交
2524 2525 2526 2527 2528 2529
		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
		trace_hardirqs_enabled(curr),
		trace_softirqs_enabled(curr));
	print_lock(this);

2530
	pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
D
Dave Jones 已提交
2531
	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
P
Peter Zijlstra 已提交
2532 2533

	print_irqtrace_events(curr);
2534
	pr_warn("\nother info that might help us debug this:\n");
2535 2536
	print_usage_bug_scenario(this);

P
Peter Zijlstra 已提交
2537 2538
	lockdep_print_held_locks(curr);

2539
	pr_warn("\nstack backtrace:\n");
P
Peter Zijlstra 已提交
2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551
	dump_stack();

	return 0;
}

/*
 * Print out an error if an invalid bit is set:
 */
static inline int
valid_state(struct task_struct *curr, struct held_lock *this,
	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
D
Dave Jones 已提交
2552
	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
P
Peter Zijlstra 已提交
2553 2554 2555 2556 2557 2558 2559
		return print_usage_bug(curr, this, bad_bit, new_bit);
	return 1;
}

static int mark_lock(struct task_struct *curr, struct held_lock *this,
		     enum lock_usage_bit new_bit);

2560
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
I
Ingo Molnar 已提交
2561 2562 2563 2564 2565

/*
 * print irq inversion bug:
 */
static int
2566 2567
print_irq_inversion_bug(struct task_struct *curr,
			struct lock_list *root, struct lock_list *other,
I
Ingo Molnar 已提交
2568 2569 2570
			struct held_lock *this, int forwards,
			const char *irqclass)
{
2571 2572 2573 2574
	struct lock_list *entry = other;
	struct lock_list *middle = NULL;
	int depth;

2575
	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
I
Ingo Molnar 已提交
2576 2577
		return 0;

2578
	pr_warn("\n");
2579 2580
	pr_warn("========================================================\n");
	pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2581
	print_kernel_ident();
2582
	pr_warn("--------------------------------------------------------\n");
2583
	pr_warn("%s/%d just changed the state of lock:\n",
2584
		curr->comm, task_pid_nr(curr));
I
Ingo Molnar 已提交
2585 2586
	print_lock(this);
	if (forwards)
2587
		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
I
Ingo Molnar 已提交
2588
	else
2589
		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2590
	print_lock_name(other->class);
2591
	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
I
Ingo Molnar 已提交
2592

2593
	pr_warn("\nother info that might help us debug this:\n");
2594 2595 2596 2597 2598

	/* Find a middle lock (if one exists) */
	depth = get_lock_depth(other);
	do {
		if (depth == 0 && (entry != root)) {
2599
			pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612
			break;
		}
		middle = entry;
		entry = get_lock_parent(entry);
		depth--;
	} while (entry && entry != root && (depth >= 0));
	if (forwards)
		print_irq_lock_scenario(root, other,
			middle ? middle->class : root->class, other->class);
	else
		print_irq_lock_scenario(other, root,
			middle ? middle->class : other->class, root->class);

I
Ingo Molnar 已提交
2613 2614
	lockdep_print_held_locks(curr);

2615
	pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2616 2617 2618
	if (!save_trace(&root->trace))
		return 0;
	print_shortest_lock_dependencies(other, root);
I
Ingo Molnar 已提交
2619

2620
	pr_warn("\nstack backtrace:\n");
I
Ingo Molnar 已提交
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
	dump_stack();

	return 0;
}

/*
 * Prove that in the forwards-direction subgraph starting at <this>
 * there is no lock matching <mask>:
 */
static int
check_usage_forwards(struct task_struct *curr, struct held_lock *this,
		     enum lock_usage_bit bit, const char *irqclass)
{
	int ret;
2635 2636
	struct lock_list root;
	struct lock_list *uninitialized_var(target_entry);
I
Ingo Molnar 已提交
2637

2638 2639 2640
	root.parent = NULL;
	root.class = hlock_class(this);
	ret = find_usage_forwards(&root, bit, &target_entry);
P
Peter Zijlstra 已提交
2641 2642 2643 2644
	if (ret < 0)
		return print_bfs_bug(ret);
	if (ret == 1)
		return ret;
I
Ingo Molnar 已提交
2645

2646
	return print_irq_inversion_bug(curr, &root, target_entry,
2647
					this, 1, irqclass);
I
Ingo Molnar 已提交
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658
}

/*
 * Prove that in the backwards-direction subgraph starting at <this>
 * there is no lock matching <mask>:
 */
static int
check_usage_backwards(struct task_struct *curr, struct held_lock *this,
		      enum lock_usage_bit bit, const char *irqclass)
{
	int ret;
2659 2660
	struct lock_list root;
	struct lock_list *uninitialized_var(target_entry);
I
Ingo Molnar 已提交
2661

2662 2663 2664
	root.parent = NULL;
	root.class = hlock_class(this);
	ret = find_usage_backwards(&root, bit, &target_entry);
P
Peter Zijlstra 已提交
2665 2666 2667 2668
	if (ret < 0)
		return print_bfs_bug(ret);
	if (ret == 1)
		return ret;
I
Ingo Molnar 已提交
2669

2670
	return print_irq_inversion_bug(curr, &root, target_entry,
2671
					this, 0, irqclass);
I
Ingo Molnar 已提交
2672 2673
}

2674
void print_irqtrace_events(struct task_struct *curr)
I
Ingo Molnar 已提交
2675 2676
{
	printk("irq event stamp: %u\n", curr->irq_events);
2677
	printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
D
Dmitry Vyukov 已提交
2678 2679
		curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
		(void *)curr->hardirq_enable_ip);
2680
	printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
D
Dmitry Vyukov 已提交
2681 2682
		curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
		(void *)curr->hardirq_disable_ip);
2683
	printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
D
Dmitry Vyukov 已提交
2684 2685
		curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
		(void *)curr->softirq_enable_ip);
2686
	printk("softirqs last disabled at (%u): [<%px>] %pS\n",
D
Dmitry Vyukov 已提交
2687 2688
		curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
		(void *)curr->softirq_disable_ip);
I
Ingo Molnar 已提交
2689 2690
}

2691
static int HARDIRQ_verbose(struct lock_class *class)
I
Ingo Molnar 已提交
2692
{
P
Peter Zijlstra 已提交
2693 2694 2695
#if HARDIRQ_VERBOSE
	return class_filter(class);
#endif
I
Ingo Molnar 已提交
2696 2697 2698
	return 0;
}

2699
static int SOFTIRQ_verbose(struct lock_class *class)
I
Ingo Molnar 已提交
2700
{
P
Peter Zijlstra 已提交
2701 2702 2703 2704
#if SOFTIRQ_VERBOSE
	return class_filter(class);
#endif
	return 0;
I
Ingo Molnar 已提交
2705 2706 2707 2708
}

#define STRICT_READ_CHECKS	1

2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721
static int (*state_verbose_f[])(struct lock_class *class) = {
#define LOCKDEP_STATE(__STATE) \
	__STATE##_verbose,
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};

static inline int state_verbose(enum lock_usage_bit bit,
				struct lock_class *class)
{
	return state_verbose_f[bit >> 2](class);
}

2722 2723 2724
typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
			     enum lock_usage_bit bit, const char *name);

2725
static int
2726 2727
mark_lock_irq(struct task_struct *curr, struct held_lock *this,
		enum lock_usage_bit new_bit)
2728
{
2729
	int excl_bit = exclusive_bit(new_bit);
2730
	int read = new_bit & 1;
2731 2732
	int dir = new_bit & 2;

2733 2734 2735 2736 2737 2738 2739
	/*
	 * mark USED_IN has to look forwards -- to ensure no dependency
	 * has ENABLED state, which would allow recursion deadlocks.
	 *
	 * mark ENABLED has to look backwards -- to ensure no dependee
	 * has USED_IN state, which, again, would allow  recursion deadlocks.
	 */
2740 2741
	check_usage_f usage = dir ?
		check_usage_backwards : check_usage_forwards;
2742

2743 2744 2745 2746
	/*
	 * Validate that this particular lock does not have conflicting
	 * usage states.
	 */
2747 2748
	if (!valid_state(curr, this, new_bit, excl_bit))
		return 0;
2749

2750 2751 2752 2753 2754
	/*
	 * Validate that the lock dependencies don't have conflicting usage
	 * states.
	 */
	if ((!read || !dir || STRICT_READ_CHECKS) &&
2755
			!usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2756
		return 0;
2757

2758 2759 2760 2761 2762 2763 2764 2765
	/*
	 * Check for read in write conflicts
	 */
	if (!read) {
		if (!valid_state(curr, this, new_bit, excl_bit + 1))
			return 0;

		if (STRICT_READ_CHECKS &&
2766 2767
			!usage(curr, this, excl_bit + 1,
				state_name(new_bit + 1)))
2768 2769
			return 0;
	}
2770

2771
	if (state_verbose(new_bit, hlock_class(this)))
2772 2773 2774 2775 2776
		return 2;

	return 1;
}

2777
enum mark_type {
2778 2779 2780
#define LOCKDEP_STATE(__STATE)	__STATE,
#include "lockdep_states.h"
#undef LOCKDEP_STATE
2781 2782
};

I
Ingo Molnar 已提交
2783 2784 2785
/*
 * Mark all held locks with a usage bit:
 */
2786
static int
2787
mark_held_locks(struct task_struct *curr, enum mark_type mark)
I
Ingo Molnar 已提交
2788 2789 2790 2791 2792 2793 2794 2795
{
	enum lock_usage_bit usage_bit;
	struct held_lock *hlock;
	int i;

	for (i = 0; i < curr->lockdep_depth; i++) {
		hlock = curr->held_locks + i;

2796 2797 2798 2799 2800
		usage_bit = 2 + (mark << 2); /* ENABLED */
		if (hlock->read)
			usage_bit += 1; /* READ */

		BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2801

2802
		if (!hlock->check)
2803 2804
			continue;

2805
		if (!mark_lock(curr, hlock, usage_bit))
I
Ingo Molnar 已提交
2806 2807 2808 2809 2810 2811 2812 2813 2814
			return 0;
	}

	return 1;
}

/*
 * Hardirqs will be enabled:
 */
2815
static void __trace_hardirqs_on_caller(unsigned long ip)
I
Ingo Molnar 已提交
2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
{
	struct task_struct *curr = current;

	/* we'll do an OFF -> ON transition: */
	curr->hardirqs_enabled = 1;

	/*
	 * We are going to turn hardirqs on, so set the
	 * usage bit for all held locks:
	 */
2826
	if (!mark_held_locks(curr, HARDIRQ))
I
Ingo Molnar 已提交
2827 2828 2829 2830 2831 2832 2833
		return;
	/*
	 * If we have softirqs enabled, then set the usage
	 * bit for all held locks. (disabled hardirqs prevented
	 * this bit from being set before)
	 */
	if (curr->softirqs_enabled)
2834
		if (!mark_held_locks(curr, SOFTIRQ))
I
Ingo Molnar 已提交
2835 2836
			return;

P
Peter Zijlstra 已提交
2837 2838
	curr->hardirq_enable_ip = ip;
	curr->hardirq_enable_event = ++curr->irq_events;
2839
	debug_atomic_inc(hardirqs_on_events);
P
Peter Zijlstra 已提交
2840
}
2841

2842
void lockdep_hardirqs_on(unsigned long ip)
2843 2844 2845 2846
{
	if (unlikely(!debug_locks || current->lockdep_recursion))
		return;

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
	if (unlikely(current->hardirqs_enabled)) {
		/*
		 * Neither irq nor preemption are disabled here
		 * so this is racy by nature but losing one hit
		 * in a stat is not a big deal.
		 */
		__debug_atomic_inc(redundant_hardirqs_on);
		return;
	}

P
Peter Zijlstra 已提交
2857 2858 2859 2860 2861
	/*
	 * We're enabling irqs and according to our state above irqs weren't
	 * already enabled, yet we find the hardware thinks they are in fact
	 * enabled.. someone messed up their IRQ state tracing.
	 */
2862 2863 2864
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;

P
Peter Zijlstra 已提交
2865 2866 2867
	/*
	 * See the fine text that goes along with this variable definition.
	 */
2868 2869 2870
	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
		return;

P
Peter Zijlstra 已提交
2871 2872 2873 2874
	/*
	 * Can't allow enabling interrupts while in an interrupt handler,
	 * that's general bad form and such. Recursion, limited stack etc..
	 */
2875 2876 2877
	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
		return;

2878 2879 2880 2881
	current->lockdep_recursion = 1;
	__trace_hardirqs_on_caller(ip);
	current->lockdep_recursion = 0;
}
P
Peter Zijlstra 已提交
2882 2883 2884 2885

/*
 * Hardirqs were disabled:
 */
2886
void lockdep_hardirqs_off(unsigned long ip)
P
Peter Zijlstra 已提交
2887 2888 2889 2890 2891 2892
{
	struct task_struct *curr = current;

	if (unlikely(!debug_locks || current->lockdep_recursion))
		return;

P
Peter Zijlstra 已提交
2893 2894 2895 2896
	/*
	 * So we're supposed to get called after you mask local IRQs, but for
	 * some reason the hardware doesn't quite think you did a proper job.
	 */
P
Peter Zijlstra 已提交
2897 2898 2899 2900 2901 2902 2903 2904
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;

	if (curr->hardirqs_enabled) {
		/*
		 * We have done an ON -> OFF transition:
		 */
		curr->hardirqs_enabled = 0;
2905
		curr->hardirq_disable_ip = ip;
P
Peter Zijlstra 已提交
2906
		curr->hardirq_disable_event = ++curr->irq_events;
2907
		debug_atomic_inc(hardirqs_off_events);
P
Peter Zijlstra 已提交
2908
	} else
2909
		debug_atomic_inc(redundant_hardirqs_off);
P
Peter Zijlstra 已提交
2910 2911 2912 2913 2914 2915 2916 2917 2918
}

/*
 * Softirqs will be enabled:
 */
void trace_softirqs_on(unsigned long ip)
{
	struct task_struct *curr = current;

2919
	if (unlikely(!debug_locks || current->lockdep_recursion))
P
Peter Zijlstra 已提交
2920 2921
		return;

P
Peter Zijlstra 已提交
2922 2923 2924 2925
	/*
	 * We fancy IRQs being disabled here, see softirq.c, avoids
	 * funny state and nesting things.
	 */
P
Peter Zijlstra 已提交
2926 2927 2928 2929
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;

	if (curr->softirqs_enabled) {
2930
		debug_atomic_inc(redundant_softirqs_on);
P
Peter Zijlstra 已提交
2931 2932 2933
		return;
	}

2934
	current->lockdep_recursion = 1;
P
Peter Zijlstra 已提交
2935 2936 2937 2938 2939 2940
	/*
	 * We'll do an OFF -> ON transition:
	 */
	curr->softirqs_enabled = 1;
	curr->softirq_enable_ip = ip;
	curr->softirq_enable_event = ++curr->irq_events;
2941
	debug_atomic_inc(softirqs_on_events);
P
Peter Zijlstra 已提交
2942 2943 2944 2945 2946 2947
	/*
	 * We are going to turn softirqs on, so set the
	 * usage bit for all held locks, if hardirqs are
	 * enabled too:
	 */
	if (curr->hardirqs_enabled)
2948
		mark_held_locks(curr, SOFTIRQ);
2949
	current->lockdep_recursion = 0;
P
Peter Zijlstra 已提交
2950 2951 2952 2953 2954 2955 2956 2957 2958
}

/*
 * Softirqs were disabled:
 */
void trace_softirqs_off(unsigned long ip)
{
	struct task_struct *curr = current;

2959
	if (unlikely(!debug_locks || current->lockdep_recursion))
P
Peter Zijlstra 已提交
2960 2961
		return;

P
Peter Zijlstra 已提交
2962 2963 2964
	/*
	 * We fancy IRQs being disabled here, see softirq.c
	 */
P
Peter Zijlstra 已提交
2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;

	if (curr->softirqs_enabled) {
		/*
		 * We have done an ON -> OFF transition:
		 */
		curr->softirqs_enabled = 0;
		curr->softirq_disable_ip = ip;
		curr->softirq_disable_event = ++curr->irq_events;
2975
		debug_atomic_inc(softirqs_off_events);
P
Peter Zijlstra 已提交
2976 2977 2978
		/*
		 * Whoops, we wanted softirqs off, so why aren't they?
		 */
P
Peter Zijlstra 已提交
2979 2980
		DEBUG_LOCKS_WARN_ON(!softirq_count());
	} else
2981
		debug_atomic_inc(redundant_softirqs_off);
P
Peter Zijlstra 已提交
2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011
}

static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
{
	/*
	 * If non-trylock use in a hardirq or softirq context, then
	 * mark the lock as used in these contexts:
	 */
	if (!hlock->trylock) {
		if (hlock->read) {
			if (curr->hardirq_context)
				if (!mark_lock(curr, hlock,
						LOCK_USED_IN_HARDIRQ_READ))
					return 0;
			if (curr->softirq_context)
				if (!mark_lock(curr, hlock,
						LOCK_USED_IN_SOFTIRQ_READ))
					return 0;
		} else {
			if (curr->hardirq_context)
				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
					return 0;
			if (curr->softirq_context)
				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
					return 0;
		}
	}
	if (!hlock->hardirqs_off) {
		if (hlock->read) {
			if (!mark_lock(curr, hlock,
P
Peter Zijlstra 已提交
3012
					LOCK_ENABLED_HARDIRQ_READ))
P
Peter Zijlstra 已提交
3013 3014 3015
				return 0;
			if (curr->softirqs_enabled)
				if (!mark_lock(curr, hlock,
P
Peter Zijlstra 已提交
3016
						LOCK_ENABLED_SOFTIRQ_READ))
P
Peter Zijlstra 已提交
3017 3018 3019
					return 0;
		} else {
			if (!mark_lock(curr, hlock,
P
Peter Zijlstra 已提交
3020
					LOCK_ENABLED_HARDIRQ))
P
Peter Zijlstra 已提交
3021 3022 3023
				return 0;
			if (curr->softirqs_enabled)
				if (!mark_lock(curr, hlock,
P
Peter Zijlstra 已提交
3024
						LOCK_ENABLED_SOFTIRQ))
P
Peter Zijlstra 已提交
3025 3026 3027 3028 3029 3030 3031
					return 0;
		}
	}

	return 1;
}

3032 3033 3034 3035 3036
static inline unsigned int task_irq_context(struct task_struct *task)
{
	return 2 * !!task->hardirq_context + !!task->softirq_context;
}

P
Peter Zijlstra 已提交
3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057
static int separate_irq_context(struct task_struct *curr,
		struct held_lock *hlock)
{
	unsigned int depth = curr->lockdep_depth;

	/*
	 * Keep track of points where we cross into an interrupt context:
	 */
	if (depth) {
		struct held_lock *prev_hlock;

		prev_hlock = curr->held_locks + depth-1;
		/*
		 * If we cross into another context, reset the
		 * hash key (this also prevents the checking and the
		 * adding of the dependency to 'prev'):
		 */
		if (prev_hlock->irq_context != hlock->irq_context)
			return 1;
	}
	return 0;
I
Ingo Molnar 已提交
3058 3059
}

P
Peter Zijlstra 已提交
3060
#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
I
Ingo Molnar 已提交
3061

P
Peter Zijlstra 已提交
3062 3063 3064
static inline
int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
		enum lock_usage_bit new_bit)
I
Ingo Molnar 已提交
3065
{
P
Peter Zijlstra 已提交
3066
	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
P
Peter Zijlstra 已提交
3067 3068
	return 1;
}
I
Ingo Molnar 已提交
3069

P
Peter Zijlstra 已提交
3070 3071 3072 3073 3074
static inline int mark_irqflags(struct task_struct *curr,
		struct held_lock *hlock)
{
	return 1;
}
I
Ingo Molnar 已提交
3075

3076 3077 3078 3079 3080
static inline unsigned int task_irq_context(struct task_struct *task)
{
	return 0;
}

P
Peter Zijlstra 已提交
3081 3082 3083 3084
static inline int separate_irq_context(struct task_struct *curr,
		struct held_lock *hlock)
{
	return 0;
I
Ingo Molnar 已提交
3085 3086
}

P
Peter Zijlstra 已提交
3087
#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
I
Ingo Molnar 已提交
3088 3089

/*
P
Peter Zijlstra 已提交
3090
 * Mark a lock with a usage bit, and validate the state transition:
I
Ingo Molnar 已提交
3091
 */
3092
static int mark_lock(struct task_struct *curr, struct held_lock *this,
3093
			     enum lock_usage_bit new_bit)
I
Ingo Molnar 已提交
3094
{
P
Peter Zijlstra 已提交
3095
	unsigned int new_mask = 1 << new_bit, ret = 1;
I
Ingo Molnar 已提交
3096 3097

	/*
P
Peter Zijlstra 已提交
3098 3099
	 * If already set then do not dirty the cacheline,
	 * nor do any checks:
I
Ingo Molnar 已提交
3100
	 */
D
Dave Jones 已提交
3101
	if (likely(hlock_class(this)->usage_mask & new_mask))
P
Peter Zijlstra 已提交
3102 3103 3104 3105
		return 1;

	if (!graph_lock())
		return 0;
I
Ingo Molnar 已提交
3106
	/*
L
Lucas De Marchi 已提交
3107
	 * Make sure we didn't race:
I
Ingo Molnar 已提交
3108
	 */
D
Dave Jones 已提交
3109
	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
P
Peter Zijlstra 已提交
3110 3111 3112
		graph_unlock();
		return 1;
	}
I
Ingo Molnar 已提交
3113

D
Dave Jones 已提交
3114
	hlock_class(this)->usage_mask |= new_mask;
I
Ingo Molnar 已提交
3115

D
Dave Jones 已提交
3116
	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
P
Peter Zijlstra 已提交
3117
		return 0;
I
Ingo Molnar 已提交
3118

P
Peter Zijlstra 已提交
3119
	switch (new_bit) {
P
Peter Zijlstra 已提交
3120 3121 3122 3123 3124 3125 3126
#define LOCKDEP_STATE(__STATE)			\
	case LOCK_USED_IN_##__STATE:		\
	case LOCK_USED_IN_##__STATE##_READ:	\
	case LOCK_ENABLED_##__STATE:		\
	case LOCK_ENABLED_##__STATE##_READ:
#include "lockdep_states.h"
#undef LOCKDEP_STATE
P
Peter Zijlstra 已提交
3127 3128 3129 3130 3131
		ret = mark_lock_irq(curr, this, new_bit);
		if (!ret)
			return 0;
		break;
	case LOCK_USED:
3132
		debug_atomic_dec(nr_unused_locks);
P
Peter Zijlstra 已提交
3133 3134 3135 3136 3137 3138 3139
		break;
	default:
		if (!debug_locks_off_graph_unlock())
			return 0;
		WARN_ON(1);
		return 0;
	}
I
Ingo Molnar 已提交
3140

P
Peter Zijlstra 已提交
3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154
	graph_unlock();

	/*
	 * We must printk outside of the graph_lock:
	 */
	if (ret == 2) {
		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
		print_lock(this);
		print_irqtrace_events(curr);
		dump_stack();
	}

	return ret;
}
I
Ingo Molnar 已提交
3155 3156 3157 3158

/*
 * Initialize a lock instance's lock-class mapping info:
 */
3159
static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3160
		      struct lock_class_key *key, int subclass)
I
Ingo Molnar 已提交
3161
{
3162 3163 3164 3165
	int i;

	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
		lock->class_cache[i] = NULL;
3166

3167 3168 3169 3170
#ifdef CONFIG_LOCK_STAT
	lock->cpu = raw_smp_processor_id();
#endif

P
Peter Zijlstra 已提交
3171 3172 3173
	/*
	 * Can't be having no nameless bastards around this place!
	 */
3174 3175
	if (DEBUG_LOCKS_WARN_ON(!name)) {
		lock->name = "NULL";
I
Ingo Molnar 已提交
3176
		return;
3177 3178 3179
	}

	lock->name = name;
I
Ingo Molnar 已提交
3180

P
Peter Zijlstra 已提交
3181 3182 3183
	/*
	 * No key, no joy, we need to hash something.
	 */
I
Ingo Molnar 已提交
3184 3185 3186 3187 3188 3189
	if (DEBUG_LOCKS_WARN_ON(!key))
		return;
	/*
	 * Sanity check, the lock-class key must be persistent:
	 */
	if (!static_obj(key)) {
3190
		printk("BUG: key %px not in .data!\n", key);
P
Peter Zijlstra 已提交
3191 3192 3193
		/*
		 * What it says above ^^^^^, I suggest you read it.
		 */
I
Ingo Molnar 已提交
3194 3195 3196 3197
		DEBUG_LOCKS_WARN_ON(1);
		return;
	}
	lock->key = key;
3198 3199 3200 3201

	if (unlikely(!debug_locks))
		return;

3202 3203 3204 3205 3206 3207 3208 3209
	if (subclass) {
		unsigned long flags;

		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
			return;

		raw_local_irq_save(flags);
		current->lockdep_recursion = 1;
3210
		register_lock_class(lock, subclass, 1);
3211 3212 3213
		current->lockdep_recursion = 0;
		raw_local_irq_restore(flags);
	}
I
Ingo Molnar 已提交
3214
}
3215 3216 3217 3218 3219 3220

void lockdep_init_map(struct lockdep_map *lock, const char *name,
		      struct lock_class_key *key, int subclass)
{
	__lockdep_init_map(lock, name, key, subclass);
}
I
Ingo Molnar 已提交
3221 3222
EXPORT_SYMBOL_GPL(lockdep_init_map);

3223
struct lock_class_key __lockdep_no_validate__;
K
Kent Overstreet 已提交
3224
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3225

3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
static int
print_lock_nested_lock_not_held(struct task_struct *curr,
				struct held_lock *hlock,
				unsigned long ip)
{
	if (!debug_locks_off())
		return 0;
	if (debug_locks_silent)
		return 0;

3236
	pr_warn("\n");
3237 3238
	pr_warn("==================================\n");
	pr_warn("WARNING: Nested lock was not taken\n");
3239
	print_kernel_ident();
3240
	pr_warn("----------------------------------\n");
3241

3242
	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3243 3244
	print_lock(hlock);

3245 3246
	pr_warn("\nbut this task is not holding:\n");
	pr_warn("%s\n", hlock->nest_lock->name);
3247

3248
	pr_warn("\nstack backtrace:\n");
3249 3250
	dump_stack();

3251
	pr_warn("\nother info that might help us debug this:\n");
3252 3253
	lockdep_print_held_locks(curr);

3254
	pr_warn("\nstack backtrace:\n");
3255 3256 3257 3258 3259
	dump_stack();

	return 0;
}

3260
static int __lock_is_held(const struct lockdep_map *lock, int read);
3261

I
Ingo Molnar 已提交
3262 3263 3264 3265 3266 3267
/*
 * This gets called for every mutex_lock*()/spin_lock*() operation.
 * We maintain the dependency maps and validate the locking attempt:
 */
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
			  int trylock, int read, int check, int hardirqs_off,
3268
			  struct lockdep_map *nest_lock, unsigned long ip,
3269
			  int references, int pin_count)
I
Ingo Molnar 已提交
3270 3271
{
	struct task_struct *curr = current;
3272
	struct lock_class *class = NULL;
I
Ingo Molnar 已提交
3273
	struct held_lock *hlock;
3274
	unsigned int depth;
I
Ingo Molnar 已提交
3275
	int chain_head = 0;
3276
	int class_idx;
I
Ingo Molnar 已提交
3277 3278 3279 3280 3281
	u64 chain_key;

	if (unlikely(!debug_locks))
		return 0;

P
Peter Zijlstra 已提交
3282 3283 3284 3285 3286
	/*
	 * Lockdep should run with IRQs disabled, otherwise we could
	 * get an interrupt which would want to take locks, which would
	 * end up in lockdep and have you got a head-ache already?
	 */
I
Ingo Molnar 已提交
3287 3288 3289
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return 0;

3290 3291
	if (!prove_locking || lock->key == &__lockdep_no_validate__)
		check = 0;
3292

3293 3294
	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
		class = lock->class_cache[subclass];
3295
	/*
3296
	 * Not cached?
3297
	 */
I
Ingo Molnar 已提交
3298
	if (unlikely(!class)) {
3299
		class = register_lock_class(lock, subclass, 0);
I
Ingo Molnar 已提交
3300 3301 3302
		if (!class)
			return 0;
	}
3303
	atomic_inc((atomic_t *)&class->ops);
I
Ingo Molnar 已提交
3304
	if (very_verbose(class)) {
3305
		printk("\nacquire class [%px] %s", class->key, class->name);
I
Ingo Molnar 已提交
3306
		if (class->name_version > 1)
D
Dmitry Vyukov 已提交
3307 3308
			printk(KERN_CONT "#%d", class->name_version);
		printk(KERN_CONT "\n");
I
Ingo Molnar 已提交
3309 3310 3311 3312 3313 3314 3315 3316 3317
		dump_stack();
	}

	/*
	 * Add the lock to the list of currently held locks.
	 * (we dont increase the depth just yet, up until the
	 * dependency checks are done)
	 */
	depth = curr->lockdep_depth;
P
Peter Zijlstra 已提交
3318 3319 3320
	/*
	 * Ran out of static storage for our per-task lock stack again have we?
	 */
I
Ingo Molnar 已提交
3321 3322 3323
	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
		return 0;

3324 3325
	class_idx = class - lock_classes + 1;

3326
	if (depth) {
3327 3328
		hlock = curr->held_locks + depth - 1;
		if (hlock->class_idx == class_idx && nest_lock) {
3329 3330 3331 3332 3333 3334 3335
			if (hlock->references) {
				/*
				 * Check: unsigned int references:12, overflow.
				 */
				if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
					return 0;

3336
				hlock->references++;
3337
			} else {
3338
				hlock->references = 2;
3339
			}
3340 3341 3342 3343 3344

			return 1;
		}
	}

I
Ingo Molnar 已提交
3345
	hlock = curr->held_locks + depth;
P
Peter Zijlstra 已提交
3346 3347 3348 3349
	/*
	 * Plain impossible, we just registered it and checked it weren't no
	 * NULL like.. I bet this mushroom I ate was good!
	 */
D
Dave Jones 已提交
3350 3351
	if (DEBUG_LOCKS_WARN_ON(!class))
		return 0;
3352
	hlock->class_idx = class_idx;
I
Ingo Molnar 已提交
3353 3354
	hlock->acquire_ip = ip;
	hlock->instance = lock;
P
Peter Zijlstra 已提交
3355
	hlock->nest_lock = nest_lock;
3356
	hlock->irq_context = task_irq_context(curr);
I
Ingo Molnar 已提交
3357 3358 3359
	hlock->trylock = trylock;
	hlock->read = read;
	hlock->check = check;
3360
	hlock->hardirqs_off = !!hardirqs_off;
3361
	hlock->references = references;
P
Peter Zijlstra 已提交
3362 3363
#ifdef CONFIG_LOCK_STAT
	hlock->waittime_stamp = 0;
3364
	hlock->holdtime_stamp = lockstat_clock();
P
Peter Zijlstra 已提交
3365
#endif
3366
	hlock->pin_count = pin_count;
I
Ingo Molnar 已提交
3367

3368
	if (check && !mark_irqflags(curr, hlock))
P
Peter Zijlstra 已提交
3369 3370
		return 0;

I
Ingo Molnar 已提交
3371
	/* mark it as used: */
3372
	if (!mark_lock(curr, hlock, LOCK_USED))
I
Ingo Molnar 已提交
3373
		return 0;
P
Peter Zijlstra 已提交
3374

I
Ingo Molnar 已提交
3375
	/*
3376
	 * Calculate the chain hash: it's the combined hash of all the
I
Ingo Molnar 已提交
3377 3378 3379 3380 3381 3382 3383 3384
	 * lock keys along the dependency chain. We save the hash value
	 * at every step so that we can get the current hash easily
	 * after unlock. The chain hash is then used to cache dependency
	 * results.
	 *
	 * The 'key ID' is what is the most compact key value to drive
	 * the hash, not class->key.
	 */
P
Peter Zijlstra 已提交
3385 3386 3387
	/*
	 * Whoops, we did it again.. ran straight out of our static allocation.
	 */
3388
	if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
I
Ingo Molnar 已提交
3389 3390 3391 3392
		return 0;

	chain_key = curr->curr_chain_key;
	if (!depth) {
P
Peter Zijlstra 已提交
3393 3394 3395
		/*
		 * How can we have a chain hash when we ain't got no keys?!
		 */
I
Ingo Molnar 已提交
3396 3397 3398 3399 3400 3401
		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
			return 0;
		chain_head = 1;
	}

	hlock->prev_chain_key = chain_key;
P
Peter Zijlstra 已提交
3402 3403 3404
	if (separate_irq_context(curr, hlock)) {
		chain_key = 0;
		chain_head = 1;
I
Ingo Molnar 已提交
3405
	}
3406
	chain_key = iterate_chain_key(chain_key, class_idx);
I
Ingo Molnar 已提交
3407

3408
	if (nest_lock && !__lock_is_held(nest_lock, -1))
3409 3410
		return print_lock_nested_lock_not_held(curr, hlock, ip);

3411
	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
P
Peter Zijlstra 已提交
3412
		return 0;
3413

3414
	curr->curr_chain_key = chain_key;
I
Ingo Molnar 已提交
3415 3416
	curr->lockdep_depth++;
	check_chain_key(curr);
3417 3418 3419 3420
#ifdef CONFIG_DEBUG_LOCKDEP
	if (unlikely(!debug_locks))
		return 0;
#endif
I
Ingo Molnar 已提交
3421 3422
	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
		debug_locks_off();
3423 3424
		print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
		printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3425 3426 3427 3428
		       curr->lockdep_depth, MAX_LOCK_DEPTH);

		lockdep_print_held_locks(current);
		debug_show_all_locks();
3429
		dump_stack();
3430

I
Ingo Molnar 已提交
3431 3432
		return 0;
	}
3433

I
Ingo Molnar 已提交
3434 3435 3436 3437 3438 3439 3440
	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
		max_lockdep_depth = curr->lockdep_depth;

	return 1;
}

static int
3441
print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
I
Ingo Molnar 已提交
3442 3443 3444 3445 3446 3447 3448
			   unsigned long ip)
{
	if (!debug_locks_off())
		return 0;
	if (debug_locks_silent)
		return 0;

3449
	pr_warn("\n");
3450 3451
	pr_warn("=====================================\n");
	pr_warn("WARNING: bad unlock balance detected!\n");
3452
	print_kernel_ident();
3453
	pr_warn("-------------------------------------\n");
3454
	pr_warn("%s/%d is trying to release lock (",
3455
		curr->comm, task_pid_nr(curr));
I
Ingo Molnar 已提交
3456
	print_lockdep_cache(lock);
3457
	pr_cont(") at:\n");
I
Ingo Molnar 已提交
3458
	print_ip_sym(ip);
3459 3460
	pr_warn("but there are no more locks to release!\n");
	pr_warn("\nother info that might help us debug this:\n");
I
Ingo Molnar 已提交
3461 3462
	lockdep_print_held_locks(curr);

3463
	pr_warn("\nstack backtrace:\n");
I
Ingo Molnar 已提交
3464 3465 3466 3467 3468
	dump_stack();

	return 0;
}

3469 3470
static int match_held_lock(const struct held_lock *hlock,
					const struct lockdep_map *lock)
3471 3472 3473 3474 3475
{
	if (hlock->instance == lock)
		return 1;

	if (hlock->references) {
3476
		const struct lock_class *class = lock->class_cache[0];
3477 3478 3479 3480

		if (!class)
			class = look_up_lock_class(lock, 0);

3481 3482 3483 3484 3485 3486
		/*
		 * If look_up_lock_class() failed to find a class, we're trying
		 * to test if we hold a lock that has never yet been acquired.
		 * Clearly if the lock hasn't been acquired _ever_, we're not
		 * holding it either, so report failure.
		 */
3487
		if (!class)
3488 3489
			return 0;

P
Peter Zijlstra 已提交
3490 3491 3492 3493 3494
		/*
		 * References, but not a lock we're actually ref-counting?
		 * State got messed up, follow the sites that change ->references
		 * and try to make sense of it.
		 */
3495 3496 3497 3498 3499 3500 3501 3502 3503 3504
		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
			return 0;

		if (hlock->class_idx == class - lock_classes + 1)
			return 1;
	}

	return 0;
}

3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
/* @depth must not be zero */
static struct held_lock *find_held_lock(struct task_struct *curr,
					struct lockdep_map *lock,
					unsigned int depth, int *idx)
{
	struct held_lock *ret, *hlock, *prev_hlock;
	int i;

	i = depth - 1;
	hlock = curr->held_locks + i;
	ret = hlock;
	if (match_held_lock(hlock, lock))
		goto out;

	ret = NULL;
	for (i--, prev_hlock = hlock--;
	     i >= 0;
	     i--, prev_hlock = hlock--) {
		/*
		 * We must not cross into another context:
		 */
		if (prev_hlock->irq_context != hlock->irq_context) {
			ret = NULL;
			break;
		}
		if (match_held_lock(hlock, lock)) {
			ret = hlock;
			break;
		}
	}

out:
	*idx = i;
	return ret;
}

3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
			      int idx)
{
	struct held_lock *hlock;

	for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
		if (!__lock_acquire(hlock->instance,
				    hlock_class(hlock)->subclass,
				    hlock->trylock,
				    hlock->read, hlock->check,
				    hlock->hardirqs_off,
				    hlock->nest_lock, hlock->acquire_ip,
				    hlock->references, hlock->pin_count))
			return 1;
	}
	return 0;
}

3559
static int
3560 3561 3562
__lock_set_class(struct lockdep_map *lock, const char *name,
		 struct lock_class_key *key, unsigned int subclass,
		 unsigned long ip)
3563 3564
{
	struct task_struct *curr = current;
3565
	struct held_lock *hlock;
3566 3567 3568 3569
	struct lock_class *class;
	unsigned int depth;
	int i;

3570 3571 3572
	if (unlikely(!debug_locks))
		return 0;

3573
	depth = curr->lockdep_depth;
P
Peter Zijlstra 已提交
3574 3575 3576 3577
	/*
	 * This function is about (re)setting the class of a held lock,
	 * yet we're not actually holding any locks. Naughty user!
	 */
3578 3579 3580
	if (DEBUG_LOCKS_WARN_ON(!depth))
		return 0;

3581 3582 3583
	hlock = find_held_lock(curr, lock, depth, &i);
	if (!hlock)
		return print_unlock_imbalance_bug(curr, lock, ip);
3584

3585
	lockdep_init_map(lock, name, key, 0);
3586
	class = register_lock_class(lock, subclass, 0);
D
Dave Jones 已提交
3587
	hlock->class_idx = class - lock_classes + 1;
3588 3589 3590 3591

	curr->lockdep_depth = i;
	curr->curr_chain_key = hlock->prev_chain_key;

3592 3593
	if (reacquire_held_locks(curr, depth, i))
		return 0;
3594

P
Peter Zijlstra 已提交
3595 3596 3597 3598
	/*
	 * I took it apart and put it back together again, except now I have
	 * these 'spare' parts.. where shall I put them.
	 */
3599 3600 3601 3602 3603
	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
		return 0;
	return 1;
}

3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{
	struct task_struct *curr = current;
	struct held_lock *hlock;
	unsigned int depth;
	int i;

	depth = curr->lockdep_depth;
	/*
	 * This function is about (re)setting the class of a held lock,
	 * yet we're not actually holding any locks. Naughty user!
	 */
	if (DEBUG_LOCKS_WARN_ON(!depth))
		return 0;

	hlock = find_held_lock(curr, lock, depth, &i);
	if (!hlock)
		return print_unlock_imbalance_bug(curr, lock, ip);

	curr->lockdep_depth = i;
	curr->curr_chain_key = hlock->prev_chain_key;

	WARN(hlock->read, "downgrading a read lock");
	hlock->read = 1;
	hlock->acquire_ip = ip;

	if (reacquire_held_locks(curr, depth, i))
		return 0;

	/*
	 * I took it apart and put it back together again, except now I have
	 * these 'spare' parts.. where shall I put them.
	 */
	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
		return 0;
	return 1;
}

I
Ingo Molnar 已提交
3642
/*
3643 3644 3645 3646 3647
 * Remove the lock to the list of currently held locks - this gets
 * called on mutex_unlock()/spin_unlock*() (or on a failed
 * mutex_lock_interruptible()).
 *
 * @nested is an hysterical artifact, needs a tree wide cleanup.
I
Ingo Molnar 已提交
3648 3649
 */
static int
3650
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
I
Ingo Molnar 已提交
3651
{
3652
	struct task_struct *curr = current;
3653
	struct held_lock *hlock;
I
Ingo Molnar 已提交
3654
	unsigned int depth;
3655
	int i;
I
Ingo Molnar 已提交
3656

3657 3658 3659
	if (unlikely(!debug_locks))
		return 0;

I
Ingo Molnar 已提交
3660
	depth = curr->lockdep_depth;
P
Peter Zijlstra 已提交
3661 3662 3663 3664
	/*
	 * So we're all set to release this lock.. wait what lock? We don't
	 * own any locks, you've been drinking again?
	 */
3665 3666
	if (DEBUG_LOCKS_WARN_ON(depth <= 0))
		 return print_unlock_imbalance_bug(curr, lock, ip);
I
Ingo Molnar 已提交
3667

3668 3669 3670 3671
	/*
	 * Check whether the lock exists in the current stack
	 * of held locks:
	 */
3672 3673 3674
	hlock = find_held_lock(curr, lock, depth, &i);
	if (!hlock)
		return print_unlock_imbalance_bug(curr, lock, ip);
I
Ingo Molnar 已提交
3675

3676 3677 3678
	if (hlock->instance == lock)
		lock_release_holdtime(hlock);

P
Peter Zijlstra 已提交
3679 3680
	WARN(hlock->pin_count, "releasing a pinned lock\n");

3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691
	if (hlock->references) {
		hlock->references--;
		if (hlock->references) {
			/*
			 * We had, and after removing one, still have
			 * references, the current lock stack is still
			 * valid. We're done!
			 */
			return 1;
		}
	}
P
Peter Zijlstra 已提交
3692

I
Ingo Molnar 已提交
3693 3694 3695 3696 3697
	/*
	 * We have the right lock to unlock, 'hlock' points to it.
	 * Now we remove it from the stack, and add back the other
	 * entries (if any), recalculating the hash along the way:
	 */
3698

I
Ingo Molnar 已提交
3699 3700 3701
	curr->lockdep_depth = i;
	curr->curr_chain_key = hlock->prev_chain_key;

3702 3703
	if (reacquire_held_locks(curr, depth, i + 1))
		return 0;
I
Ingo Molnar 已提交
3704

P
Peter Zijlstra 已提交
3705 3706 3707 3708
	/*
	 * We had N bottles of beer on the wall, we drank one, but now
	 * there's not N-1 bottles of beer left on the wall...
	 */
I
Ingo Molnar 已提交
3709 3710
	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
		return 0;
P
Peter Zijlstra 已提交
3711

I
Ingo Molnar 已提交
3712 3713 3714
	return 1;
}

3715
static int __lock_is_held(const struct lockdep_map *lock, int read)
I
Ingo Molnar 已提交
3716
{
3717 3718
	struct task_struct *curr = current;
	int i;
I
Ingo Molnar 已提交
3719

3720
	for (i = 0; i < curr->lockdep_depth; i++) {
3721
		struct held_lock *hlock = curr->held_locks + i;
I
Ingo Molnar 已提交
3722

3723 3724 3725 3726 3727 3728
		if (match_held_lock(hlock, lock)) {
			if (read == -1 || hlock->read == read)
				return 1;

			return 0;
		}
3729
	}
P
Peter Zijlstra 已提交
3730

3731
	return 0;
I
Ingo Molnar 已提交
3732 3733
}

3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762
static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
{
	struct pin_cookie cookie = NIL_COOKIE;
	struct task_struct *curr = current;
	int i;

	if (unlikely(!debug_locks))
		return cookie;

	for (i = 0; i < curr->lockdep_depth; i++) {
		struct held_lock *hlock = curr->held_locks + i;

		if (match_held_lock(hlock, lock)) {
			/*
			 * Grab 16bits of randomness; this is sufficient to not
			 * be guessable and still allows some pin nesting in
			 * our u32 pin_count.
			 */
			cookie.val = 1 + (prandom_u32() >> 16);
			hlock->pin_count += cookie.val;
			return cookie;
		}
	}

	WARN(1, "pinning an unheld lock\n");
	return cookie;
}

static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
I
Ingo Molnar 已提交
3763 3764
{
	struct task_struct *curr = current;
P
Peter Zijlstra 已提交
3765
	int i;
I
Ingo Molnar 已提交
3766

P
Peter Zijlstra 已提交
3767
	if (unlikely(!debug_locks))
I
Ingo Molnar 已提交
3768 3769
		return;

P
Peter Zijlstra 已提交
3770 3771 3772 3773
	for (i = 0; i < curr->lockdep_depth; i++) {
		struct held_lock *hlock = curr->held_locks + i;

		if (match_held_lock(hlock, lock)) {
3774
			hlock->pin_count += cookie.val;
I
Ingo Molnar 已提交
3775
			return;
P
Peter Zijlstra 已提交
3776
		}
I
Ingo Molnar 已提交
3777 3778
	}

P
Peter Zijlstra 已提交
3779
	WARN(1, "pinning an unheld lock\n");
I
Ingo Molnar 已提交
3780 3781
}

3782
static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3783 3784 3785 3786
{
	struct task_struct *curr = current;
	int i;

P
Peter Zijlstra 已提交
3787 3788 3789
	if (unlikely(!debug_locks))
		return;

3790
	for (i = 0; i < curr->lockdep_depth; i++) {
3791 3792
		struct held_lock *hlock = curr->held_locks + i;

P
Peter Zijlstra 已提交
3793 3794 3795 3796
		if (match_held_lock(hlock, lock)) {
			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
				return;

3797 3798 3799 3800 3801
			hlock->pin_count -= cookie.val;

			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
				hlock->pin_count = 0;

P
Peter Zijlstra 已提交
3802 3803
			return;
		}
3804 3805
	}

P
Peter Zijlstra 已提交
3806
	WARN(1, "unpinning an unheld lock\n");
3807 3808
}

I
Ingo Molnar 已提交
3809 3810 3811
/*
 * Check whether we follow the irq-flags state precisely:
 */
3812
static void check_flags(unsigned long flags)
I
Ingo Molnar 已提交
3813
{
3814 3815
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
    defined(CONFIG_TRACE_IRQFLAGS)
I
Ingo Molnar 已提交
3816 3817 3818
	if (!debug_locks)
		return;

3819 3820 3821 3822 3823 3824 3825 3826 3827
	if (irqs_disabled_flags(flags)) {
		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
			printk("possible reason: unannotated irqs-off.\n");
		}
	} else {
		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
			printk("possible reason: unannotated irqs-on.\n");
		}
	}
I
Ingo Molnar 已提交
3828 3829 3830 3831 3832 3833 3834

	/*
	 * We dont accurately track softirq state in e.g.
	 * hardirq contexts (such as on 4KSTACKS), so only
	 * check if not in hardirq contexts:
	 */
	if (!hardirq_count()) {
P
Peter Zijlstra 已提交
3835 3836
		if (softirq_count()) {
			/* like the above, but with softirqs */
I
Ingo Molnar 已提交
3837
			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
P
Peter Zijlstra 已提交
3838 3839
		} else {
			/* lick the above, does it taste good? */
I
Ingo Molnar 已提交
3840
			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
P
Peter Zijlstra 已提交
3841
		}
I
Ingo Molnar 已提交
3842 3843 3844 3845 3846 3847 3848
	}

	if (!debug_locks)
		print_irqtrace_events(current);
#endif
}

3849 3850 3851
void lock_set_class(struct lockdep_map *lock, const char *name,
		    struct lock_class_key *key, unsigned int subclass,
		    unsigned long ip)
3852 3853 3854 3855 3856 3857 3858 3859 3860
{
	unsigned long flags;

	if (unlikely(current->lockdep_recursion))
		return;

	raw_local_irq_save(flags);
	current->lockdep_recursion = 1;
	check_flags(flags);
3861
	if (__lock_set_class(lock, name, key, subclass, ip))
3862 3863 3864 3865
		check_chain_key(current);
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
}
3866
EXPORT_SYMBOL_GPL(lock_set_class);
3867

3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884
void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{
	unsigned long flags;

	if (unlikely(current->lockdep_recursion))
		return;

	raw_local_irq_save(flags);
	current->lockdep_recursion = 1;
	check_flags(flags);
	if (__lock_downgrade(lock, ip))
		check_chain_key(current);
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_downgrade);

I
Ingo Molnar 已提交
3885 3886 3887 3888
/*
 * We are not always called with irqs disabled - do that here,
 * and also avoid lockdep recursion:
 */
3889
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
P
Peter Zijlstra 已提交
3890 3891
			  int trylock, int read, int check,
			  struct lockdep_map *nest_lock, unsigned long ip)
I
Ingo Molnar 已提交
3892 3893 3894 3895 3896 3897 3898 3899 3900 3901
{
	unsigned long flags;

	if (unlikely(current->lockdep_recursion))
		return;

	raw_local_irq_save(flags);
	check_flags(flags);

	current->lockdep_recursion = 1;
3902
	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
I
Ingo Molnar 已提交
3903
	__lock_acquire(lock, subclass, trylock, read, check,
3904
		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
I
Ingo Molnar 已提交
3905 3906 3907 3908 3909
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);

3910
void lock_release(struct lockdep_map *lock, int nested,
3911
			  unsigned long ip)
I
Ingo Molnar 已提交
3912 3913 3914 3915 3916 3917 3918 3919 3920
{
	unsigned long flags;

	if (unlikely(current->lockdep_recursion))
		return;

	raw_local_irq_save(flags);
	check_flags(flags);
	current->lockdep_recursion = 1;
3921
	trace_lock_release(lock, ip);
3922 3923
	if (__lock_release(lock, nested, ip))
		check_chain_key(current);
I
Ingo Molnar 已提交
3924 3925 3926 3927 3928
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_release);

3929
int lock_is_held_type(const struct lockdep_map *lock, int read)
3930 3931 3932 3933 3934
{
	unsigned long flags;
	int ret = 0;

	if (unlikely(current->lockdep_recursion))
3935
		return 1; /* avoid false negative lockdep_assert_held() */
3936 3937 3938 3939 3940

	raw_local_irq_save(flags);
	check_flags(flags);

	current->lockdep_recursion = 1;
3941
	ret = __lock_is_held(lock, read);
3942 3943 3944 3945 3946
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);

	return ret;
}
3947
EXPORT_SYMBOL_GPL(lock_is_held_type);
3948

3949
struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
P
Peter Zijlstra 已提交
3950
{
3951
	struct pin_cookie cookie = NIL_COOKIE;
P
Peter Zijlstra 已提交
3952 3953 3954
	unsigned long flags;

	if (unlikely(current->lockdep_recursion))
3955
		return cookie;
P
Peter Zijlstra 已提交
3956 3957 3958 3959 3960

	raw_local_irq_save(flags);
	check_flags(flags);

	current->lockdep_recursion = 1;
3961
	cookie = __lock_pin_lock(lock);
P
Peter Zijlstra 已提交
3962 3963
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
3964 3965

	return cookie;
P
Peter Zijlstra 已提交
3966 3967 3968
}
EXPORT_SYMBOL_GPL(lock_pin_lock);

3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986
void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
	unsigned long flags;

	if (unlikely(current->lockdep_recursion))
		return;

	raw_local_irq_save(flags);
	check_flags(flags);

	current->lockdep_recursion = 1;
	__lock_repin_lock(lock, cookie);
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_repin_lock);

void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
P
Peter Zijlstra 已提交
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996
{
	unsigned long flags;

	if (unlikely(current->lockdep_recursion))
		return;

	raw_local_irq_save(flags);
	check_flags(flags);

	current->lockdep_recursion = 1;
3997
	__lock_unpin_lock(lock, cookie);
P
Peter Zijlstra 已提交
3998 3999 4000 4001 4002
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_unpin_lock);

P
Peter Zijlstra 已提交
4003 4004 4005 4006 4007 4008 4009 4010 4011 4012
#ifdef CONFIG_LOCK_STAT
static int
print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
			   unsigned long ip)
{
	if (!debug_locks_off())
		return 0;
	if (debug_locks_silent)
		return 0;

4013
	pr_warn("\n");
4014 4015
	pr_warn("=================================\n");
	pr_warn("WARNING: bad contention detected!\n");
4016
	print_kernel_ident();
4017
	pr_warn("---------------------------------\n");
4018
	pr_warn("%s/%d is trying to contend lock (",
4019
		curr->comm, task_pid_nr(curr));
P
Peter Zijlstra 已提交
4020
	print_lockdep_cache(lock);
4021
	pr_cont(") at:\n");
P
Peter Zijlstra 已提交
4022
	print_ip_sym(ip);
4023 4024
	pr_warn("but there are no locks held!\n");
	pr_warn("\nother info that might help us debug this:\n");
P
Peter Zijlstra 已提交
4025 4026
	lockdep_print_held_locks(curr);

4027
	pr_warn("\nstack backtrace:\n");
P
Peter Zijlstra 已提交
4028 4029 4030 4031 4032 4033 4034 4035 4036
	dump_stack();

	return 0;
}

static void
__lock_contended(struct lockdep_map *lock, unsigned long ip)
{
	struct task_struct *curr = current;
4037
	struct held_lock *hlock;
P
Peter Zijlstra 已提交
4038 4039
	struct lock_class_stats *stats;
	unsigned int depth;
P
Peter Zijlstra 已提交
4040
	int i, contention_point, contending_point;
P
Peter Zijlstra 已提交
4041 4042

	depth = curr->lockdep_depth;
P
Peter Zijlstra 已提交
4043 4044 4045 4046
	/*
	 * Whee, we contended on this lock, except it seems we're not
	 * actually trying to acquire anything much at all..
	 */
P
Peter Zijlstra 已提交
4047 4048 4049
	if (DEBUG_LOCKS_WARN_ON(!depth))
		return;

4050 4051 4052 4053
	hlock = find_held_lock(curr, lock, depth, &i);
	if (!hlock) {
		print_lock_contention_bug(curr, lock, ip);
		return;
P
Peter Zijlstra 已提交
4054 4055
	}

4056 4057 4058
	if (hlock->instance != lock)
		return;

4059
	hlock->waittime_stamp = lockstat_clock();
P
Peter Zijlstra 已提交
4060

P
Peter Zijlstra 已提交
4061 4062 4063
	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
	contending_point = lock_point(hlock_class(hlock)->contending_point,
				      lock->ip);
P
Peter Zijlstra 已提交
4064

D
Dave Jones 已提交
4065
	stats = get_lock_stats(hlock_class(hlock));
P
Peter Zijlstra 已提交
4066 4067 4068 4069
	if (contention_point < LOCKSTAT_POINTS)
		stats->contention_point[contention_point]++;
	if (contending_point < LOCKSTAT_POINTS)
		stats->contending_point[contending_point]++;
P
Peter Zijlstra 已提交
4070 4071
	if (lock->cpu != smp_processor_id())
		stats->bounces[bounce_contended + !!hlock->read]++;
P
Peter Zijlstra 已提交
4072 4073 4074
}

static void
P
Peter Zijlstra 已提交
4075
__lock_acquired(struct lockdep_map *lock, unsigned long ip)
P
Peter Zijlstra 已提交
4076 4077
{
	struct task_struct *curr = current;
4078
	struct held_lock *hlock;
P
Peter Zijlstra 已提交
4079 4080
	struct lock_class_stats *stats;
	unsigned int depth;
4081
	u64 now, waittime = 0;
P
Peter Zijlstra 已提交
4082
	int i, cpu;
P
Peter Zijlstra 已提交
4083 4084

	depth = curr->lockdep_depth;
P
Peter Zijlstra 已提交
4085 4086 4087 4088
	/*
	 * Yay, we acquired ownership of this lock we didn't try to
	 * acquire, how the heck did that happen?
	 */
P
Peter Zijlstra 已提交
4089 4090 4091
	if (DEBUG_LOCKS_WARN_ON(!depth))
		return;

4092 4093 4094 4095
	hlock = find_held_lock(curr, lock, depth, &i);
	if (!hlock) {
		print_lock_contention_bug(curr, lock, _RET_IP_);
		return;
P
Peter Zijlstra 已提交
4096 4097
	}

4098 4099 4100
	if (hlock->instance != lock)
		return;

P
Peter Zijlstra 已提交
4101 4102
	cpu = smp_processor_id();
	if (hlock->waittime_stamp) {
4103
		now = lockstat_clock();
P
Peter Zijlstra 已提交
4104 4105 4106
		waittime = now - hlock->waittime_stamp;
		hlock->holdtime_stamp = now;
	}
P
Peter Zijlstra 已提交
4107

4108
	trace_lock_acquired(lock, ip);
4109

D
Dave Jones 已提交
4110
	stats = get_lock_stats(hlock_class(hlock));
P
Peter Zijlstra 已提交
4111 4112 4113 4114 4115 4116 4117 4118 4119 4120
	if (waittime) {
		if (hlock->read)
			lock_time_inc(&stats->read_waittime, waittime);
		else
			lock_time_inc(&stats->write_waittime, waittime);
	}
	if (lock->cpu != cpu)
		stats->bounces[bounce_acquired + !!hlock->read]++;

	lock->cpu = cpu;
P
Peter Zijlstra 已提交
4121
	lock->ip = ip;
P
Peter Zijlstra 已提交
4122 4123 4124 4125 4126 4127
}

void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
	unsigned long flags;

4128
	if (unlikely(!lock_stat || !debug_locks))
P
Peter Zijlstra 已提交
4129 4130 4131 4132 4133 4134 4135 4136
		return;

	if (unlikely(current->lockdep_recursion))
		return;

	raw_local_irq_save(flags);
	check_flags(flags);
	current->lockdep_recursion = 1;
4137
	trace_lock_contended(lock, ip);
P
Peter Zijlstra 已提交
4138 4139 4140 4141 4142 4143
	__lock_contended(lock, ip);
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_contended);

P
Peter Zijlstra 已提交
4144
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
P
Peter Zijlstra 已提交
4145 4146 4147
{
	unsigned long flags;

4148
	if (unlikely(!lock_stat || !debug_locks))
P
Peter Zijlstra 已提交
4149 4150 4151 4152 4153 4154 4155 4156
		return;

	if (unlikely(current->lockdep_recursion))
		return;

	raw_local_irq_save(flags);
	check_flags(flags);
	current->lockdep_recursion = 1;
P
Peter Zijlstra 已提交
4157
	__lock_acquired(lock, ip);
P
Peter Zijlstra 已提交
4158 4159 4160 4161 4162 4163
	current->lockdep_recursion = 0;
	raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquired);
#endif

I
Ingo Molnar 已提交
4164 4165 4166 4167 4168 4169 4170 4171
/*
 * Used by the testsuite, sanitize the validator state
 * after a simulated failure:
 */

void lockdep_reset(void)
{
	unsigned long flags;
4172
	int i;
I
Ingo Molnar 已提交
4173 4174 4175 4176 4177 4178 4179 4180 4181 4182

	raw_local_irq_save(flags);
	current->curr_chain_key = 0;
	current->lockdep_depth = 0;
	current->lockdep_recursion = 0;
	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
	nr_hardirq_chains = 0;
	nr_softirq_chains = 0;
	nr_process_chains = 0;
	debug_locks = 1;
4183
	for (i = 0; i < CHAINHASH_SIZE; i++)
4184
		INIT_HLIST_HEAD(chainhash_table + i);
I
Ingo Molnar 已提交
4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202
	raw_local_irq_restore(flags);
}

static void zap_class(struct lock_class *class)
{
	int i;

	/*
	 * Remove all dependencies this lock is
	 * involved in:
	 */
	for (i = 0; i < nr_list_entries; i++) {
		if (list_entries[i].class == class)
			list_del_rcu(&list_entries[i].entry);
	}
	/*
	 * Unhash the class and remove it from the all_lock_classes list:
	 */
4203
	hlist_del_rcu(&class->hash_entry);
I
Ingo Molnar 已提交
4204 4205
	list_del_rcu(&class->lock_entry);

4206 4207
	RCU_INIT_POINTER(class->key, NULL);
	RCU_INIT_POINTER(class->name, NULL);
I
Ingo Molnar 已提交
4208 4209
}

4210
static inline int within(const void *addr, void *start, unsigned long size)
I
Ingo Molnar 已提交
4211 4212 4213 4214
{
	return addr >= start && addr < start + size;
}

4215 4216 4217 4218 4219 4220 4221 4222
/*
 * Used in module.c to remove lock classes from memory that is going to be
 * freed; and possibly re-used by other modules.
 *
 * We will have had one sync_sched() before getting here, so we're guaranteed
 * nobody will look up these exact classes -- they're properly dead but still
 * allocated.
 */
I
Ingo Molnar 已提交
4223 4224
void lockdep_free_key_range(void *start, unsigned long size)
{
4225
	struct lock_class *class;
4226
	struct hlist_head *head;
I
Ingo Molnar 已提交
4227 4228
	unsigned long flags;
	int i;
4229
	int locked;
I
Ingo Molnar 已提交
4230 4231

	raw_local_irq_save(flags);
4232
	locked = graph_lock();
I
Ingo Molnar 已提交
4233 4234 4235 4236 4237 4238

	/*
	 * Unhash all classes that were created by this module:
	 */
	for (i = 0; i < CLASSHASH_SIZE; i++) {
		head = classhash_table + i;
4239
		hlist_for_each_entry_rcu(class, head, hash_entry) {
I
Ingo Molnar 已提交
4240 4241
			if (within(class->key, start, size))
				zap_class(class);
4242 4243 4244
			else if (within(class->name, start, size))
				zap_class(class);
		}
I
Ingo Molnar 已提交
4245 4246
	}

4247 4248
	if (locked)
		graph_unlock();
I
Ingo Molnar 已提交
4249
	raw_local_irq_restore(flags);
4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263

	/*
	 * Wait for any possible iterators from look_up_lock_class() to pass
	 * before continuing to free the memory they refer to.
	 *
	 * sync_sched() is sufficient because the read-side is IRQ disable.
	 */
	synchronize_sched();

	/*
	 * XXX at this point we could return the resources to the pool;
	 * instead we leak them. We would need to change to bitmap allocators
	 * instead of the linear allocators we have now.
	 */
I
Ingo Molnar 已提交
4264 4265 4266 4267
}

void lockdep_reset_lock(struct lockdep_map *lock)
{
4268
	struct lock_class *class;
4269
	struct hlist_head *head;
I
Ingo Molnar 已提交
4270 4271
	unsigned long flags;
	int i, j;
4272
	int locked;
I
Ingo Molnar 已提交
4273 4274 4275 4276

	raw_local_irq_save(flags);

	/*
4277 4278 4279 4280 4281 4282 4283
	 * Remove all classes this lock might have:
	 */
	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
		/*
		 * If the class exists we look it up and zap it:
		 */
		class = look_up_lock_class(lock, j);
4284
		if (class)
4285 4286 4287 4288 4289
			zap_class(class);
	}
	/*
	 * Debug check: in the end all mapped classes should
	 * be gone.
I
Ingo Molnar 已提交
4290
	 */
4291
	locked = graph_lock();
I
Ingo Molnar 已提交
4292 4293
	for (i = 0; i < CLASSHASH_SIZE; i++) {
		head = classhash_table + i;
4294
		hlist_for_each_entry_rcu(class, head, hash_entry) {
4295 4296 4297 4298 4299 4300
			int match = 0;

			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
				match |= class == lock->class_cache[j];

			if (unlikely(match)) {
P
Peter Zijlstra 已提交
4301 4302 4303 4304
				if (debug_locks_off_graph_unlock()) {
					/*
					 * We all just reset everything, how did it match?
					 */
4305
					WARN_ON(1);
P
Peter Zijlstra 已提交
4306
				}
4307
				goto out_restore;
I
Ingo Molnar 已提交
4308 4309 4310
			}
		}
	}
4311 4312
	if (locked)
		graph_unlock();
4313 4314

out_restore:
I
Ingo Molnar 已提交
4315 4316 4317
	raw_local_irq_restore(flags);
}

4318
void __init lockdep_init(void)
I
Ingo Molnar 已提交
4319 4320 4321
{
	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");

4322
	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
I
Ingo Molnar 已提交
4323 4324
	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4325
	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
I
Ingo Molnar 已提交
4326 4327 4328 4329 4330 4331 4332 4333 4334
	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);

	printk(" memory used by lock dependency info: %lu kB\n",
		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
		sizeof(struct list_head) * CLASSHASH_SIZE +
		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
M
Ming Lei 已提交
4335
		sizeof(struct list_head) * CHAINHASH_SIZE
4336
#ifdef CONFIG_PROVE_LOCKING
4337
		+ sizeof(struct circular_queue)
4338
#endif
M
Ming Lei 已提交
4339
		) / 1024
4340
		);
I
Ingo Molnar 已提交
4341 4342 4343 4344 4345 4346 4347

	printk(" per task-struct memory footprint: %lu bytes\n",
		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
}

static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4348
		     const void *mem_to, struct held_lock *hlock)
I
Ingo Molnar 已提交
4349 4350 4351 4352 4353 4354
{
	if (!debug_locks_off())
		return;
	if (debug_locks_silent)
		return;

4355
	pr_warn("\n");
4356 4357
	pr_warn("=========================\n");
	pr_warn("WARNING: held lock freed!\n");
4358
	print_kernel_ident();
4359
	pr_warn("-------------------------\n");
4360
	pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
4361
		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4362
	print_lock(hlock);
I
Ingo Molnar 已提交
4363 4364
	lockdep_print_held_locks(curr);

4365
	pr_warn("\nstack backtrace:\n");
I
Ingo Molnar 已提交
4366 4367 4368
	dump_stack();
}

O
Oleg Nesterov 已提交
4369 4370 4371 4372 4373 4374 4375
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
				const void* lock_from, unsigned long lock_len)
{
	return lock_from + lock_len <= mem_from ||
		mem_from + mem_len <= lock_from;
}

I
Ingo Molnar 已提交
4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390
/*
 * Called when kernel memory is freed (or unmapped), or if a lock
 * is destroyed or reinitialized - this code checks whether there is
 * any held lock in the memory range of <from> to <to>:
 */
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{
	struct task_struct *curr = current;
	struct held_lock *hlock;
	unsigned long flags;
	int i;

	if (unlikely(!debug_locks))
		return;

4391
	raw_local_irq_save(flags);
I
Ingo Molnar 已提交
4392 4393 4394
	for (i = 0; i < curr->lockdep_depth; i++) {
		hlock = curr->held_locks + i;

O
Oleg Nesterov 已提交
4395 4396
		if (not_in_range(mem_from, mem_len, hlock->instance,
					sizeof(*hlock->instance)))
I
Ingo Molnar 已提交
4397 4398
			continue;

O
Oleg Nesterov 已提交
4399
		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
I
Ingo Molnar 已提交
4400 4401
		break;
	}
4402
	raw_local_irq_restore(flags);
I
Ingo Molnar 已提交
4403
}
4404
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
I
Ingo Molnar 已提交
4405

4406
static void print_held_locks_bug(void)
I
Ingo Molnar 已提交
4407 4408 4409 4410 4411 4412
{
	if (!debug_locks_off())
		return;
	if (debug_locks_silent)
		return;

4413
	pr_warn("\n");
4414 4415
	pr_warn("====================================\n");
	pr_warn("WARNING: %s/%d still has locks held!\n",
4416
	       current->comm, task_pid_nr(current));
4417
	print_kernel_ident();
4418
	pr_warn("------------------------------------\n");
4419
	lockdep_print_held_locks(current);
4420
	pr_warn("\nstack backtrace:\n");
I
Ingo Molnar 已提交
4421 4422 4423
	dump_stack();
}

4424
void debug_check_no_locks_held(void)
I
Ingo Molnar 已提交
4425
{
4426 4427
	if (unlikely(current->lockdep_depth > 0))
		print_held_locks_bug();
I
Ingo Molnar 已提交
4428
}
4429
EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
I
Ingo Molnar 已提交
4430

4431
#ifdef __KERNEL__
I
Ingo Molnar 已提交
4432 4433 4434 4435
void debug_show_all_locks(void)
{
	struct task_struct *g, *p;

4436
	if (unlikely(!debug_locks)) {
4437
		pr_warn("INFO: lockdep is turned off.\n");
4438 4439
		return;
	}
4440
	pr_warn("\nShowing all locks held in the system:\n");
I
Ingo Molnar 已提交
4441

4442 4443 4444 4445 4446
	rcu_read_lock();
	for_each_process_thread(g, p) {
		if (!p->lockdep_depth)
			continue;
		lockdep_print_held_locks(p);
4447
		touch_nmi_watchdog();
4448 4449 4450
		touch_all_softlockup_watchdogs();
	}
	rcu_read_unlock();
I
Ingo Molnar 已提交
4451

4452
	pr_warn("\n");
4453
	pr_warn("=============================================\n\n");
I
Ingo Molnar 已提交
4454 4455
}
EXPORT_SYMBOL_GPL(debug_show_all_locks);
4456
#endif
I
Ingo Molnar 已提交
4457

4458 4459 4460 4461
/*
 * Careful: only use this function if you are sure that
 * the task cannot run in parallel!
 */
4462
void debug_show_held_locks(struct task_struct *task)
I
Ingo Molnar 已提交
4463
{
4464 4465 4466 4467
	if (unlikely(!debug_locks)) {
		printk("INFO: lockdep is turned off.\n");
		return;
	}
I
Ingo Molnar 已提交
4468 4469 4470
	lockdep_print_held_locks(task);
}
EXPORT_SYMBOL_GPL(debug_show_held_locks);
P
Peter Zijlstra 已提交
4471

4472
asmlinkage __visible void lockdep_sys_exit(void)
P
Peter Zijlstra 已提交
4473 4474 4475 4476 4477 4478
{
	struct task_struct *curr = current;

	if (unlikely(curr->lockdep_depth)) {
		if (!debug_locks_off())
			return;
4479
		pr_warn("\n");
4480 4481
		pr_warn("================================================\n");
		pr_warn("WARNING: lock held when returning to user space!\n");
4482
		print_kernel_ident();
4483
		pr_warn("------------------------------------------------\n");
4484
		pr_warn("%s/%d is leaving the kernel with locks still held!\n",
P
Peter Zijlstra 已提交
4485 4486 4487
				curr->comm, curr->pid);
		lockdep_print_held_locks(curr);
	}
4488 4489 4490 4491 4492

	/*
	 * The lock history for each syscall should be independent. So wipe the
	 * slate clean on return to userspace.
	 */
4493
	lockdep_invariant_state(false);
P
Peter Zijlstra 已提交
4494
}
4495

4496
void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4497 4498 4499
{
	struct task_struct *curr = current;

4500
	/* Note: the following can be executed concurrently, so be careful. */
4501
	pr_warn("\n");
4502 4503
	pr_warn("=============================\n");
	pr_warn("WARNING: suspicious RCU usage\n");
4504
	print_kernel_ident();
4505
	pr_warn("-----------------------------\n");
4506 4507 4508
	pr_warn("%s:%d %s!\n", file, line, s);
	pr_warn("\nother info that might help us debug this:\n\n");
	pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4509 4510
	       !rcu_lockdep_current_cpu_online()
			? "RCU used illegally from offline CPU!\n"
4511
			: !rcu_is_watching()
4512 4513 4514
				? "RCU used illegally from idle CPU!\n"
				: "",
	       rcu_scheduler_active, debug_locks);
4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533

	/*
	 * If a CPU is in the RCU-free window in idle (ie: in the section
	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
	 * considers that CPU to be in an "extended quiescent state",
	 * which means that RCU will be completely ignoring that CPU.
	 * Therefore, rcu_read_lock() and friends have absolutely no
	 * effect on a CPU running in that state. In other words, even if
	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
	 * delete data structures out from under it.  RCU really has no
	 * choice here: we need to keep an RCU-free window in idle where
	 * the CPU may possibly enter into low power mode. This way we can
	 * notice an extended quiescent state to other CPUs that started a grace
	 * period. Otherwise we would delay any grace period as long as we run
	 * in the idle task.
	 *
	 * So complain bitterly if someone does call rcu_read_lock(),
	 * rcu_read_lock_bh() and so on from extended quiescent states.
	 */
4534
	if (!rcu_is_watching())
4535
		pr_warn("RCU used illegally from extended quiescent state!\n");
4536

4537
	lockdep_print_held_locks(curr);
4538
	pr_warn("\nstack backtrace:\n");
4539 4540
	dump_stack();
}
4541
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);