mutex.c 23.5 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/locking/mutex.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12
 *
 * Mutexes: blocking mutual exclusion locks
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 * David Howells for suggestions and improvements.
 *
13 14 15 16 17
 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 *    from the -rt tree, where it was originally implemented for rtmutexes
 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 *    and Sven Dietrich.
 *
I
Ingo Molnar 已提交
18 19 20
 * Also see Documentation/mutex-design.txt.
 */
#include <linux/mutex.h>
21
#include <linux/ww_mutex.h>
I
Ingo Molnar 已提交
22
#include <linux/sched.h>
23
#include <linux/sched/rt.h>
24
#include <linux/export.h>
I
Ingo Molnar 已提交
25 26
#include <linux/spinlock.h>
#include <linux/interrupt.h>
27
#include <linux/debug_locks.h>
28
#include "mcs_spinlock.h"
I
Ingo Molnar 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41

/*
 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
 * which forces all calls into the slowpath:
 */
#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
# include <asm-generic/mutex-null.h>
#else
# include "mutex.h"
# include <asm/mutex.h>
#endif

42
/*
43 44
 * A negative mutex count indicates that waiters are sleeping waiting for the
 * mutex.
45 46 47
 */
#define	MUTEX_SHOW_NO_WAITER(mutex)	(atomic_read(&(mutex)->count) >= 0)

48 49
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
I
Ingo Molnar 已提交
50 51 52 53
{
	atomic_set(&lock->count, 1);
	spin_lock_init(&lock->wait_lock);
	INIT_LIST_HEAD(&lock->wait_list);
54
	mutex_clear_owner(lock);
55
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
56
	lock->mcs_lock = NULL;
57
#endif
I
Ingo Molnar 已提交
58

59
	debug_mutex_init(lock, name, key);
I
Ingo Molnar 已提交
60 61 62 63
}

EXPORT_SYMBOL(__mutex_init);

P
Peter Zijlstra 已提交
64
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
65 66 67 68 69 70
/*
 * We split the mutex lock/unlock logic into separate fastpath and
 * slowpath functions, to reduce the register pressure on the fastpath.
 * We also put the fastpath first in the kernel image, to make sure the
 * branch is predicted by the CPU as default-untaken.
 */
71
static __used noinline void __sched
72
__mutex_lock_slowpath(atomic_t *lock_count);
I
Ingo Molnar 已提交
73

74
/**
I
Ingo Molnar 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
 * mutex_lock - acquire the mutex
 * @lock: the mutex to be acquired
 *
 * Lock the mutex exclusively for this task. If the mutex is not
 * available right now, it will sleep until it can get it.
 *
 * The mutex must later on be released by the same task that
 * acquired it. Recursive locking is not allowed. The task
 * may not exit without first unlocking the mutex. Also, kernel
 * memory where the mutex resides mutex must not be freed with
 * the mutex still locked. The mutex must first be initialized
 * (or statically defined) before it can be locked. memset()-ing
 * the mutex to 0 is not allowed.
 *
 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 *   checks that will enforce the restrictions and will also do
 *   deadlock debugging. )
 *
 * This function is similar to (but not equivalent to) down().
 */
95
void __sched mutex_lock(struct mutex *lock)
I
Ingo Molnar 已提交
96
{
97
	might_sleep();
I
Ingo Molnar 已提交
98 99 100 101 102
	/*
	 * The locking fastpath is the 1->0 transition from
	 * 'unlocked' into 'locked' state.
	 */
	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103
	mutex_set_owner(lock);
I
Ingo Molnar 已提交
104 105 106
}

EXPORT_SYMBOL(mutex_lock);
P
Peter Zijlstra 已提交
107
#endif
I
Ingo Molnar 已提交
108

109
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
110 111 112 113 114 115 116
/*
 * In order to avoid a stampede of mutex spinners from acquiring the mutex
 * more or less simultaneously, the spinners need to acquire a MCS lock
 * first before spinning on the owner field.
 *
 */

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
/*
 * Mutex spinning code migrated from kernel/sched/core.c
 */

static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
	if (lock->owner != owner)
		return false;

	/*
	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
	 * lock->owner still matches owner, if that fails, owner might
	 * point to free()d memory, if it still matches, the rcu_read_lock()
	 * ensures the memory stays valid.
	 */
	barrier();

	return owner->on_cpu;
}

/*
 * Look out! "owner" is an entirely speculative pointer
 * access and not reliable.
 */
static noinline
int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
{
	rcu_read_lock();
	while (owner_running(lock, owner)) {
		if (need_resched())
			break;

		arch_mutex_cpu_relax();
	}
	rcu_read_unlock();

	/*
	 * We break out the loop above on need_resched() and when the
	 * owner changed, which is a sign for heavy contention. Return
	 * success only when lock->owner is NULL.
	 */
	return lock->owner == NULL;
}
160 161 162 163 164 165

/*
 * Initial check for entering the mutex spinning loop
 */
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
166
	struct task_struct *owner;
167 168
	int retval = 1;

169 170 171
	if (need_resched())
		return 0;

172
	rcu_read_lock();
173 174 175
	owner = ACCESS_ONCE(lock->owner);
	if (owner)
		retval = owner->on_cpu;
176 177 178 179 180 181 182
	rcu_read_unlock();
	/*
	 * if lock->owner is not set, the mutex owner may have just acquired
	 * it and not set the owner yet or the mutex has been released.
	 */
	return retval;
}
183 184
#endif

185
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
I
Ingo Molnar 已提交
186

187
/**
I
Ingo Molnar 已提交
188 189 190 191 192 193 194 195 196 197
 * mutex_unlock - release the mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a not locked mutex is not allowed.
 *
 * This function is similar to (but not equivalent to) up().
 */
198
void __sched mutex_unlock(struct mutex *lock)
I
Ingo Molnar 已提交
199 200 201 202 203
{
	/*
	 * The unlocking fastpath is the 0->1 transition from 'locked'
	 * into 'unlocked' state:
	 */
204 205 206 207 208 209 210 211
#ifndef CONFIG_DEBUG_MUTEXES
	/*
	 * When debugging is enabled we must not clear the owner before time,
	 * the slow path will always be taken, and that clears the owner field
	 * after verifying that it was indeed current.
	 */
	mutex_clear_owner(lock);
#endif
I
Ingo Molnar 已提交
212 213 214 215 216
	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}

EXPORT_SYMBOL(mutex_unlock);

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
/**
 * ww_mutex_unlock - release the w/w mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously with any of the
 * ww_mutex_lock* functions (with or without an acquire context). It is
 * forbidden to release the locks after releasing the acquire context.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a unlocked mutex is not allowed.
 */
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
	/*
	 * The unlocking fastpath is the 0->1 transition from 'locked'
	 * into 'unlocked' state:
	 */
	if (lock->ctx) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
#endif
		if (lock->ctx->acquired > 0)
			lock->ctx->acquired--;
		lock->ctx = NULL;
	}

#ifndef CONFIG_DEBUG_MUTEXES
	/*
	 * When debugging is enabled we must not clear the owner before time,
	 * the slow path will always be taken, and that clears the owner field
	 * after verifying that it was indeed current.
	 */
	mutex_clear_owner(&lock->base);
#endif
	__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
}
EXPORT_SYMBOL(ww_mutex_unlock);

static inline int __sched
__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
{
	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
	struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);

	if (!hold_ctx)
		return 0;

	if (unlikely(ctx == hold_ctx))
		return -EALREADY;

	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
		ctx->contending_lock = ww;
#endif
		return -EDEADLK;
	}

	return 0;
}

static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
						   struct ww_acquire_ctx *ww_ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
	/*
	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
	 * but released with a normal mutex_unlock in this call.
	 *
	 * This should never happen, always use ww_mutex_unlock.
	 */
	DEBUG_LOCKS_WARN_ON(ww->ctx);

	/*
	 * Not quite done after calling ww_acquire_done() ?
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);

	if (ww_ctx->contending_lock) {
		/*
		 * After -EDEADLK you tried to
		 * acquire a different ww_mutex? Bad!
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);

		/*
		 * You called ww_mutex_lock after receiving -EDEADLK,
		 * but 'forgot' to unlock everything else first?
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
		ww_ctx->contending_lock = NULL;
	}

	/*
	 * Naughty, using a different class will lead to undefined behavior!
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
#endif
	ww_ctx->acquired++;
}

/*
 * after acquiring lock with fastpath or when we lost out in contested
 * slowpath, set ctx and wake up any waiters so they can recheck.
 *
 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
 * as the fastpath and opportunistic spinning are disabled in that case.
 */
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
			       struct ww_acquire_ctx *ctx)
{
	unsigned long flags;
	struct mutex_waiter *cur;

	ww_mutex_lock_acquired(lock, ctx);

	lock->ctx = ctx;

	/*
	 * The lock->ctx update should be visible on all cores before
	 * the atomic read is done, otherwise contended waiters might be
	 * missed. The contended waiters will either see ww_ctx == NULL
	 * and keep spinning, or it will acquire wait_lock, add itself
	 * to waiter list and sleep.
	 */
	smp_mb(); /* ^^^ */

	/*
	 * Check if lock is contended, if not there is nobody to wake up
	 */
	if (likely(atomic_read(&lock->base.count) == 0))
		return;

	/*
	 * Uh oh, we raced in fastpath, wake up everyone in this case,
	 * so they can see the new lock->ctx.
	 */
	spin_lock_mutex(&lock->base.wait_lock, flags);
	list_for_each_entry(cur, &lock->base.wait_list, list) {
		debug_mutex_wake_waiter(&lock->base, cur);
		wake_up_process(cur->task);
	}
	spin_unlock_mutex(&lock->base.wait_lock, flags);
}

I
Ingo Molnar 已提交
364 365 366
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
367
static __always_inline int __sched
P
Peter Zijlstra 已提交
368
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
369
		    struct lockdep_map *nest_lock, unsigned long ip,
370
		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
I
Ingo Molnar 已提交
371 372 373
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
374
	unsigned long flags;
375
	int ret;
I
Ingo Molnar 已提交
376

P
Peter Zijlstra 已提交
377
	preempt_disable();
378
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
379 380

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
	/*
	 * Optimistic spinning.
	 *
	 * We try to spin for acquisition when we find that there are no
	 * pending waiters and the lock owner is currently running on a
	 * (different) CPU.
	 *
	 * The rationale is that if the lock owner is running, it is likely to
	 * release the lock soon.
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
397 398 399 400 401
	 *
	 * The mutex spinners are queued up using MCS lock so that only one
	 * spinner can compete for the mutex. However, if mutex spinning isn't
	 * going to happen, there is no point in going through the lock/unlock
	 * overhead.
402
	 */
403 404
	if (!mutex_can_spin_on_owner(lock))
		goto slowpath;
405

406
	mcs_spin_lock(&lock->mcs_lock, &node);
407
	for (;;) {
408
		struct task_struct *owner;
409

410
		if (use_ww_ctx && ww_ctx->acquired > 0) {
411 412 413 414 415 416 417 418 419 420 421 422
			struct ww_mutex *ww;

			ww = container_of(lock, struct ww_mutex, base);
			/*
			 * If ww->ctx is set the contents are undefined, only
			 * by acquiring wait_lock there is a guarantee that
			 * they are not invalid when reading.
			 *
			 * As such, when deadlock detection needs to be
			 * performed the optimistic spinning cannot be done.
			 */
			if (ACCESS_ONCE(ww->ctx))
423
				break;
424 425
		}

426 427 428 429 430
		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		owner = ACCESS_ONCE(lock->owner);
431 432
		if (owner && !mutex_spin_on_owner(lock, owner))
			break;
433

434 435
		if ((atomic_read(&lock->count) == 1) &&
		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
436
			lock_acquired(&lock->dep_map, ip);
437
			if (use_ww_ctx) {
438 439 440 441 442 443
				struct ww_mutex *ww;
				ww = container_of(lock, struct ww_mutex, base);

				ww_mutex_set_context_fastpath(ww, ww_ctx);
			}

444
			mutex_set_owner(lock);
445
			mcs_spin_unlock(&lock->mcs_lock, &node);
446 447 448 449
			preempt_enable();
			return 0;
		}

450 451 452 453 454 455 456
		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(task)))
457
			break;
458 459 460 461 462 463 464

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
465
		arch_mutex_cpu_relax();
466
	}
467
	mcs_spin_unlock(&lock->mcs_lock, &node);
468
slowpath:
469
#endif
470
	spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
471

472 473 474 475
	/* once more, can we acquire the lock? */
	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
		goto skip_wait;

476
	debug_mutex_lock_common(lock, &waiter);
R
Roman Zippel 已提交
477
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
I
Ingo Molnar 已提交
478 479 480 481 482

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

P
Peter Zijlstra 已提交
483
	lock_contended(&lock->dep_map, ip);
484

I
Ingo Molnar 已提交
485 486 487 488 489 490 491 492 493 494
	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
495
		if (MUTEX_SHOW_NO_WAITER(lock) &&
496
		    (atomic_xchg(&lock->count, -1) == 1))
I
Ingo Molnar 已提交
497 498 499 500 501 502
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
503
		if (unlikely(signal_pending_state(state, task))) {
504 505 506
			ret = -EINTR;
			goto err;
		}
I
Ingo Molnar 已提交
507

508
		if (use_ww_ctx && ww_ctx->acquired > 0) {
509 510 511
			ret = __mutex_lock_check_stamp(lock, ww_ctx);
			if (ret)
				goto err;
I
Ingo Molnar 已提交
512
		}
513

I
Ingo Molnar 已提交
514 515
		__set_task_state(task, state);

L
Lucas De Marchi 已提交
516
		/* didn't get the lock, go to sleep: */
517
		spin_unlock_mutex(&lock->wait_lock, flags);
518
		schedule_preempt_disabled();
519
		spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
520
	}
521 522 523 524 525
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);
	debug_mutex_free_waiter(&waiter);
I
Ingo Molnar 已提交
526

527 528
skip_wait:
	/* got the lock - cleanup and rejoice! */
P
Peter Zijlstra 已提交
529
	lock_acquired(&lock->dep_map, ip);
530
	mutex_set_owner(lock);
I
Ingo Molnar 已提交
531

532
	if (use_ww_ctx) {
533
		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
		struct mutex_waiter *cur;

		/*
		 * This branch gets optimized out for the common case,
		 * and is only important for ww_mutex_lock.
		 */
		ww_mutex_lock_acquired(ww, ww_ctx);
		ww->ctx = ww_ctx;

		/*
		 * Give any possible sleeping processes the chance to wake up,
		 * so they can recheck if they have to back off.
		 */
		list_for_each_entry(cur, &lock->wait_list, list) {
			debug_mutex_wake_waiter(lock, cur);
			wake_up_process(cur->task);
		}
	}

553
	spin_unlock_mutex(&lock->wait_lock, flags);
P
Peter Zijlstra 已提交
554
	preempt_enable();
I
Ingo Molnar 已提交
555
	return 0;
556 557 558 559 560 561 562 563

err:
	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
	spin_unlock_mutex(&lock->wait_lock, flags);
	debug_mutex_free_waiter(&waiter);
	mutex_release(&lock->dep_map, 1, ip);
	preempt_enable();
	return ret;
I
Ingo Molnar 已提交
564 565
}

566 567 568 569 570
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
571
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
572
			    subclass, NULL, _RET_IP_, NULL, 0);
573 574 575
}

EXPORT_SYMBOL_GPL(mutex_lock_nested);
576

577 578 579 580
void __sched
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
	might_sleep();
581
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
582
			    0, nest, _RET_IP_, NULL, 0);
583 584 585 586
}

EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);

L
Liam R. Howlett 已提交
587 588 589 590
int __sched
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
591
	return __mutex_lock_common(lock, TASK_KILLABLE,
592
				   subclass, NULL, _RET_IP_, NULL, 0);
L
Liam R. Howlett 已提交
593 594 595
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);

596 597 598 599
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
600
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
601
				   subclass, NULL, _RET_IP_, NULL, 0);
602 603 604
}

EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
605

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
	unsigned tmp;

	if (ctx->deadlock_inject_countdown-- == 0) {
		tmp = ctx->deadlock_inject_interval;
		if (tmp > UINT_MAX/4)
			tmp = UINT_MAX;
		else
			tmp = tmp*2 + tmp + tmp/2;

		ctx->deadlock_inject_interval = tmp;
		ctx->deadlock_inject_countdown = tmp;
		ctx->contending_lock = lock;

		ww_mutex_unlock(lock);

		return -EDEADLK;
	}
#endif

	return 0;
}
631 632 633 634

int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
635 636
	int ret;

637
	might_sleep();
638
	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
639
				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
640
	if (!ret && ctx->acquired > 1)
641 642 643
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
644 645 646 647 648 649
}
EXPORT_SYMBOL_GPL(__ww_mutex_lock);

int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
650 651
	int ret;

652
	might_sleep();
653
	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
654
				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
655

656
	if (!ret && ctx->acquired > 1)
657 658 659
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
660 661 662
}
EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);

663 664
#endif

I
Ingo Molnar 已提交
665 666 667
/*
 * Release the lock, slowpath:
 */
668
static inline void
669
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
I
Ingo Molnar 已提交
670
{
671
	struct mutex *lock = container_of(lock_count, struct mutex, count);
672
	unsigned long flags;
I
Ingo Molnar 已提交
673 674 675 676 677 678 679 680 681

	/*
	 * some architectures leave the lock unlocked in the fastpath failure
	 * case, others need to leave it locked. In the later case we have to
	 * unlock it here
	 */
	if (__mutex_slowpath_needs_to_unlock())
		atomic_set(&lock->count, 1);

682 683 684 685
	spin_lock_mutex(&lock->wait_lock, flags);
	mutex_release(&lock->dep_map, nested, _RET_IP_);
	debug_mutex_unlock(lock);

I
Ingo Molnar 已提交
686 687 688 689 690 691 692 693 694 695 696
	if (!list_empty(&lock->wait_list)) {
		/* get the first entry from the wait-list: */
		struct mutex_waiter *waiter =
				list_entry(lock->wait_list.next,
					   struct mutex_waiter, list);

		debug_mutex_wake_waiter(lock, waiter);

		wake_up_process(waiter->task);
	}

697
	spin_unlock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
698 699
}

700 701 702
/*
 * Release the lock, slowpath:
 */
703
static __used noinline void
704 705
__mutex_unlock_slowpath(atomic_t *lock_count)
{
706
	__mutex_unlock_common_slowpath(lock_count, 1);
707 708
}

P
Peter Zijlstra 已提交
709
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
710 711 712 713
/*
 * Here come the less common (and hence less performance-critical) APIs:
 * mutex_lock_interruptible() and mutex_trylock().
 */
714
static noinline int __sched
715
__mutex_lock_killable_slowpath(struct mutex *lock);
L
Liam R. Howlett 已提交
716

717
static noinline int __sched
718
__mutex_lock_interruptible_slowpath(struct mutex *lock);
I
Ingo Molnar 已提交
719

720 721
/**
 * mutex_lock_interruptible - acquire the mutex, interruptible
I
Ingo Molnar 已提交
722 723 724 725 726 727 728 729 730
 * @lock: the mutex to be acquired
 *
 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
 * been acquired or sleep until the mutex becomes available. If a
 * signal arrives while waiting for the lock then this function
 * returns -EINTR.
 *
 * This function is similar to (but not equivalent to) down_interruptible().
 */
731
int __sched mutex_lock_interruptible(struct mutex *lock)
I
Ingo Molnar 已提交
732
{
733 734
	int ret;

735
	might_sleep();
736 737
	ret =  __mutex_fastpath_lock_retval(&lock->count);
	if (likely(!ret)) {
738
		mutex_set_owner(lock);
739 740 741
		return 0;
	} else
		return __mutex_lock_interruptible_slowpath(lock);
I
Ingo Molnar 已提交
742 743 744 745
}

EXPORT_SYMBOL(mutex_lock_interruptible);

746
int __sched mutex_lock_killable(struct mutex *lock)
L
Liam R. Howlett 已提交
747
{
748 749
	int ret;

L
Liam R. Howlett 已提交
750
	might_sleep();
751 752
	ret = __mutex_fastpath_lock_retval(&lock->count);
	if (likely(!ret)) {
753
		mutex_set_owner(lock);
754 755 756
		return 0;
	} else
		return __mutex_lock_killable_slowpath(lock);
L
Liam R. Howlett 已提交
757 758 759
}
EXPORT_SYMBOL(mutex_lock_killable);

760
static __used noinline void __sched
P
Peter Zijlstra 已提交
761 762 763 764
__mutex_lock_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);

765
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
766
			    NULL, _RET_IP_, NULL, 0);
P
Peter Zijlstra 已提交
767 768
}

769
static noinline int __sched
770
__mutex_lock_killable_slowpath(struct mutex *lock)
L
Liam R. Howlett 已提交
771
{
772
	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
773
				   NULL, _RET_IP_, NULL, 0);
L
Liam R. Howlett 已提交
774 775
}

776
static noinline int __sched
777
__mutex_lock_interruptible_slowpath(struct mutex *lock)
I
Ingo Molnar 已提交
778
{
779
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
780
				   NULL, _RET_IP_, NULL, 0);
781 782 783 784 785 786
}

static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
787
				   NULL, _RET_IP_, ctx, 1);
I
Ingo Molnar 已提交
788
}
789 790 791 792 793 794

static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
					    struct ww_acquire_ctx *ctx)
{
	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
795
				   NULL, _RET_IP_, ctx, 1);
796 797
}

P
Peter Zijlstra 已提交
798
#endif
I
Ingo Molnar 已提交
799 800 801 802 803 804 805 806

/*
 * Spinlock based trylock, we take the spinlock and check whether we
 * can get the lock:
 */
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);
807
	unsigned long flags;
I
Ingo Molnar 已提交
808 809
	int prev;

810
	spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
811 812

	prev = atomic_xchg(&lock->count, -1);
813
	if (likely(prev == 1)) {
814
		mutex_set_owner(lock);
815 816
		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
	}
817

I
Ingo Molnar 已提交
818 819 820 821
	/* Set it back to 0 if there are no waiters: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

822
	spin_unlock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
823 824 825 826

	return prev == 1;
}

827 828
/**
 * mutex_trylock - try to acquire the mutex, without waiting
I
Ingo Molnar 已提交
829 830 831 832 833 834
 * @lock: the mutex to be acquired
 *
 * Try to acquire the mutex atomically. Returns 1 if the mutex
 * has been acquired successfully, and 0 on contention.
 *
 * NOTE: this function follows the spin_trylock() convention, so
835
 * it is negated from the down_trylock() return values! Be careful
I
Ingo Molnar 已提交
836 837 838 839 840
 * about this when converting semaphore users to mutexes.
 *
 * This function must not be used in interrupt context. The
 * mutex must be released by the same task that acquired it.
 */
841
int __sched mutex_trylock(struct mutex *lock)
I
Ingo Molnar 已提交
842
{
843 844 845 846 847 848 849
	int ret;

	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
	if (ret)
		mutex_set_owner(lock);

	return ret;
I
Ingo Molnar 已提交
850 851
}
EXPORT_SYMBOL(mutex_trylock);
852

853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
#ifndef CONFIG_DEBUG_LOCK_ALLOC
int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	int ret;

	might_sleep();

	ret = __mutex_fastpath_lock_retval(&lock->base.count);

	if (likely(!ret)) {
		ww_mutex_set_context_fastpath(lock, ctx);
		mutex_set_owner(&lock->base);
	} else
		ret = __ww_mutex_lock_slowpath(lock, ctx);
	return ret;
}
EXPORT_SYMBOL(__ww_mutex_lock);

int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	int ret;

	might_sleep();

	ret = __mutex_fastpath_lock_retval(&lock->base.count);

	if (likely(!ret)) {
		ww_mutex_set_context_fastpath(lock, ctx);
		mutex_set_owner(&lock->base);
	} else
		ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
	return ret;
}
EXPORT_SYMBOL(__ww_mutex_lock_interruptible);

#endif

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
/**
 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
 * @cnt: the atomic which we are to dec
 * @lock: the mutex to return holding if we dec to 0
 *
 * return true and hold lock if we dec to 0, return false otherwise
 */
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
	/* dec if we can't possibly hit 0 */
	if (atomic_add_unless(cnt, -1, 1))
		return 0;
	/* we might hit 0, so take the lock */
	mutex_lock(lock);
	if (!atomic_dec_and_test(cnt)) {
		/* when we actually did the dec, we didn't hit 0 */
		mutex_unlock(lock);
		return 0;
	}
	/* we hit 0, and we hold the lock */
	return 1;
}
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);