mutex.c 29.8 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/locking/mutex.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12
 *
 * Mutexes: blocking mutual exclusion locks
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 * David Howells for suggestions and improvements.
 *
13 14 15 16 17
 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 *    from the -rt tree, where it was originally implemented for rtmutexes
 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 *    and Sven Dietrich.
 *
18
 * Also see Documentation/locking/mutex-design.txt.
I
Ingo Molnar 已提交
19 20
 */
#include <linux/mutex.h>
21
#include <linux/ww_mutex.h>
I
Ingo Molnar 已提交
22
#include <linux/sched.h>
23
#include <linux/sched/rt.h>
24
#include <linux/export.h>
I
Ingo Molnar 已提交
25 26
#include <linux/spinlock.h>
#include <linux/interrupt.h>
27
#include <linux/debug_locks.h>
28
#include <linux/osq_lock.h>
I
Ingo Molnar 已提交
29 30 31 32 33 34 35

#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
#else
# include "mutex.h"
#endif

36 37
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
I
Ingo Molnar 已提交
38
{
39
	atomic_long_set(&lock->owner, 0);
I
Ingo Molnar 已提交
40 41
	spin_lock_init(&lock->wait_lock);
	INIT_LIST_HEAD(&lock->wait_list);
42
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
43
	osq_lock_init(&lock->osq);
44
#endif
I
Ingo Molnar 已提交
45

46
	debug_mutex_init(lock, name, key);
I
Ingo Molnar 已提交
47 48 49
}
EXPORT_SYMBOL(__mutex_init);

50 51 52
/*
 * @owner: contains: 'struct task_struct *' to the current lock owner,
 * NULL means not owned. Since task_struct pointers are aligned at
53
 * at least L1_CACHE_BYTES, we have low bits to store extra state.
54 55
 *
 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
56
 * Bit1 indicates unlock needs to hand the lock to the top-waiter
57
 * Bit2 indicates handoff has been done and we're waiting for pickup.
58 59
 */
#define MUTEX_FLAG_WAITERS	0x01
60
#define MUTEX_FLAG_HANDOFF	0x02
61
#define MUTEX_FLAG_PICKUP	0x04
62

63
#define MUTEX_FLAGS		0x07
64 65 66 67 68 69 70 71 72 73 74 75

static inline struct task_struct *__owner_task(unsigned long owner)
{
	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
}

static inline unsigned long __owner_flags(unsigned long owner)
{
	return owner & MUTEX_FLAGS;
}

/*
76
 * Trylock variant that retuns the owning task on failure.
77
 */
78
static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
79 80 81 82 83
{
	unsigned long owner, curr = (unsigned long)current;

	owner = atomic_long_read(&lock->owner);
	for (;;) { /* must loop, can race against a flag */
84
		unsigned long old, flags = __owner_flags(owner);
85 86 87 88 89 90 91 92
		unsigned long task = owner & ~MUTEX_FLAGS;

		if (task) {
			if (likely(task != curr))
				break;

			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
				break;
93

94 95 96 97 98
			flags &= ~MUTEX_FLAG_PICKUP;
		} else {
#ifdef CONFIG_DEBUG_MUTEXES
			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
#endif
99 100 101 102 103 104 105
		}

		/*
		 * We set the HANDOFF bit, we must make sure it doesn't live
		 * past the point where we acquire it. This would be possible
		 * if we (accidentally) set the bit on an unlocked mutex.
		 */
106
		flags &= ~MUTEX_FLAG_HANDOFF;
107

108
		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
109
		if (old == owner)
110
			return NULL;
111 112 113

		owner = old;
	}
114 115 116 117 118 119 120 121 122 123

	return __owner_task(owner);
}

/*
 * Actual trylock that will work on any unlocked state.
 */
static inline bool __mutex_trylock(struct mutex *lock)
{
	return !__mutex_trylock_or_owner(lock);
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
}

#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
 * Lockdep annotations are contained to the slow paths for simplicity.
 * There is nothing that would stop spreading the lockdep annotations outwards
 * except more code.
 */

/*
 * Optimistic trylock that only works in the uncontended case. Make sure to
 * follow with a __mutex_trylock() before failing.
 */
static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
{
	unsigned long curr = (unsigned long)current;

	if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
		return true;

	return false;
}

static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
{
	unsigned long curr = (unsigned long)current;

	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
		return true;

	return false;
}
#endif

static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
{
	atomic_long_or(flag, &lock->owner);
}

static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
{
	atomic_long_andnot(flag, &lock->owner);
}

168 169 170 171 172 173 174
static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
{
	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
}

/*
 * Give up ownership to a specific task, when @task = NULL, this is equivalent
175 176 177
 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
 * WAITERS. Provides RELEASE semantics like a regular unlock, the
 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
178 179 180 181 182 183 184 185 186 187
 */
static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
{
	unsigned long owner = atomic_long_read(&lock->owner);

	for (;;) {
		unsigned long old, new;

#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
188
		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
189 190 191 192
#endif

		new = (owner & MUTEX_FLAG_WAITERS);
		new |= (unsigned long)task;
193 194
		if (task)
			new |= MUTEX_FLAG_PICKUP;
195 196 197 198 199 200 201 202 203

		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
		if (old == owner)
			break;

		owner = old;
	}
}

P
Peter Zijlstra 已提交
204
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
205 206 207 208 209 210
/*
 * We split the mutex lock/unlock logic into separate fastpath and
 * slowpath functions, to reduce the register pressure on the fastpath.
 * We also put the fastpath first in the kernel image, to make sure the
 * branch is predicted by the CPU as default-untaken.
 */
211
static void __sched __mutex_lock_slowpath(struct mutex *lock);
I
Ingo Molnar 已提交
212

213
/**
I
Ingo Molnar 已提交
214 215 216 217 218 219 220 221 222
 * mutex_lock - acquire the mutex
 * @lock: the mutex to be acquired
 *
 * Lock the mutex exclusively for this task. If the mutex is not
 * available right now, it will sleep until it can get it.
 *
 * The mutex must later on be released by the same task that
 * acquired it. Recursive locking is not allowed. The task
 * may not exit without first unlocking the mutex. Also, kernel
223
 * memory where the mutex resides must not be freed with
I
Ingo Molnar 已提交
224 225 226 227 228 229 230 231 232 233
 * the mutex still locked. The mutex must first be initialized
 * (or statically defined) before it can be locked. memset()-ing
 * the mutex to 0 is not allowed.
 *
 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 *   checks that will enforce the restrictions and will also do
 *   deadlock debugging. )
 *
 * This function is similar to (but not equivalent to) down().
 */
234
void __sched mutex_lock(struct mutex *lock)
I
Ingo Molnar 已提交
235
{
236
	might_sleep();
I
Ingo Molnar 已提交
237

238 239 240
	if (!__mutex_trylock_fast(lock))
		__mutex_lock_slowpath(lock);
}
I
Ingo Molnar 已提交
241
EXPORT_SYMBOL(mutex_lock);
P
Peter Zijlstra 已提交
242
#endif
I
Ingo Molnar 已提交
243

P
Peter Zijlstra 已提交
244 245
static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
{
#ifdef CONFIG_DEBUG_MUTEXES
	/*
	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
	 * but released with a normal mutex_unlock in this call.
	 *
	 * This should never happen, always use ww_mutex_unlock.
	 */
	DEBUG_LOCKS_WARN_ON(ww->ctx);

	/*
	 * Not quite done after calling ww_acquire_done() ?
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);

	if (ww_ctx->contending_lock) {
		/*
		 * After -EDEADLK you tried to
		 * acquire a different ww_mutex? Bad!
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);

		/*
		 * You called ww_mutex_lock after receiving -EDEADLK,
		 * but 'forgot' to unlock everything else first?
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
		ww_ctx->contending_lock = NULL;
	}

	/*
	 * Naughty, using a different class will lead to undefined behavior!
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
#endif
	ww_ctx->acquired++;
}

284 285 286 287 288 289 290
static inline bool __sched
__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
{
	return a->stamp - b->stamp <= LONG_MAX &&
	       (a->stamp != b->stamp || a > b);
}

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
/*
 * Wake up any waiters that may have to back off when the lock is held by the
 * given context.
 *
 * Due to the invariants on the wait list, this can only affect the first
 * waiter with a context.
 *
 * The current task must not be on the wait list.
 */
static void __sched
__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
{
	struct mutex_waiter *cur;

	lockdep_assert_held(&lock->wait_lock);

	list_for_each_entry(cur, &lock->wait_list, list) {
		if (!cur->ww_ctx)
			continue;

		if (cur->ww_ctx->acquired > 0 &&
		    __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
			debug_mutex_wake_waiter(lock, cur);
			wake_up_process(cur->task);
		}

		break;
	}
}

321
/*
322
 * After acquiring lock with fastpath or when we lost out in contested
323 324 325
 * slowpath, set ctx and wake up any waiters so they can recheck.
 */
static __always_inline void
P
Peter Zijlstra 已提交
326
ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
{
	unsigned long flags;

	ww_mutex_lock_acquired(lock, ctx);

	lock->ctx = ctx;

	/*
	 * The lock->ctx update should be visible on all cores before
	 * the atomic read is done, otherwise contended waiters might be
	 * missed. The contended waiters will either see ww_ctx == NULL
	 * and keep spinning, or it will acquire wait_lock, add itself
	 * to waiter list and sleep.
	 */
	smp_mb(); /* ^^^ */

	/*
	 * Check if lock is contended, if not there is nobody to wake up
	 */
346
	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
347 348 349 350 351 352 353
		return;

	/*
	 * Uh oh, we raced in fastpath, wake up everyone in this case,
	 * so they can see the new lock->ctx.
	 */
	spin_lock_mutex(&lock->base.wait_lock, flags);
354
	__ww_mutex_wakeup_for_backoff(&lock->base, ctx);
355 356 357
	spin_unlock_mutex(&lock->base.wait_lock, flags);
}

358
/*
359 360 361 362
 * After acquiring lock in the slowpath set ctx.
 *
 * Unlike for the fast path, the caller ensures that waiters are woken up where
 * necessary.
363 364 365 366
 *
 * Callers must hold the mutex wait_lock.
 */
static __always_inline void
P
Peter Zijlstra 已提交
367
ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
368 369 370 371
{
	ww_mutex_lock_acquired(lock, ctx);
	lock->ctx = ctx;
}
372

373 374
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
375 376 377 378
 * Look out! "owner" is an entirely speculative pointer access and not
 * reliable.
 *
 * "noinline" so that this function shows up on perf profiles.
379 380
 */
static noinline
381 382
bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
			 struct ww_acquire_ctx *ww_ctx)
383
{
384
	bool ret = true;
385

386
	rcu_read_lock();
387
	while (__mutex_owner(lock) == owner) {
388 389
		/*
		 * Ensure we emit the owner->on_cpu, dereference _after_
390 391
		 * checking lock->owner still matches owner. If that fails,
		 * owner might point to freed memory. If it still matches,
392 393 394 395
		 * the rcu_read_lock() ensures the memory stays valid.
		 */
		barrier();

396 397 398 399 400
		/*
		 * Use vcpu_is_preempted to detect lock holder preemption issue.
		 */
		if (!owner->on_cpu || need_resched() ||
				vcpu_is_preempted(task_cpu(owner))) {
401 402 403
			ret = false;
			break;
		}
404

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
		if (ww_ctx && ww_ctx->acquired > 0) {
			struct ww_mutex *ww;

			ww = container_of(lock, struct ww_mutex, base);

			/*
			 * If ww->ctx is set the contents are undefined, only
			 * by acquiring wait_lock there is a guarantee that
			 * they are not invalid when reading.
			 *
			 * As such, when deadlock detection needs to be
			 * performed the optimistic spinning cannot be done.
			 *
			 * Check this in every inner iteration because we may
			 * be racing against another thread's ww_mutex_lock.
			 */
			if (READ_ONCE(ww->ctx)) {
				ret = false;
				break;
			}
		}

427
		cpu_relax();
428 429 430
	}
	rcu_read_unlock();

431
	return ret;
432
}
433 434 435 436 437 438

/*
 * Initial check for entering the mutex spinning loop
 */
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
439
	struct task_struct *owner;
440 441
	int retval = 1;

442 443 444
	if (need_resched())
		return 0;

445
	rcu_read_lock();
446
	owner = __mutex_owner(lock);
447 448 449 450 451

	/*
	 * As lock holder preemption issue, we both skip spinning if task is not
	 * on cpu or its cpu is preempted
	 */
452
	if (owner)
453
		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
454
	rcu_read_unlock();
455

456
	/*
457 458 459
	 * If lock->owner is not set, the mutex has been released. Return true
	 * such that we'll trylock in the spin path, which is a faster option
	 * than the blocking slow path.
460 461 462
	 */
	return retval;
}
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478

/*
 * Optimistic spinning.
 *
 * We try to spin for acquisition when we find that the lock owner
 * is currently running on a (different) CPU and while we don't
 * need to reschedule. The rationale is that if the lock owner is
 * running, it is likely to release the lock soon.
 *
 * The mutex spinners are queued up using MCS lock so that only one
 * spinner can compete for the mutex. However, if mutex spinning isn't
 * going to happen, there is no point in going through the lock/unlock
 * overhead.
 *
 * Returns true when the lock was taken, otherwise false, indicating
 * that we need to jump to the slowpath and sleep.
479 480 481 482 483
 *
 * The waiter flag is set to true if the spinner is a waiter in the wait
 * queue. The waiter-spinner will spin on the lock directly and concurrently
 * with the spinner at the head of the OSQ, if present, until the owner is
 * changed to itself.
484
 */
P
Peter Zijlstra 已提交
485 486 487
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
		      const bool use_ww_ctx, const bool waiter)
488
{
489 490 491 492 493 494 495 496 497 498
	if (!waiter) {
		/*
		 * The purpose of the mutex_can_spin_on_owner() function is
		 * to eliminate the overhead of osq_lock() and osq_unlock()
		 * in case spinning isn't possible. As a waiter-spinner
		 * is not going to take OSQ lock anyway, there is no need
		 * to call mutex_can_spin_on_owner().
		 */
		if (!mutex_can_spin_on_owner(lock))
			goto fail;
499

500 501 502 503 504 505 506 507
		/*
		 * In order to avoid a stampede of mutex spinners trying to
		 * acquire the mutex all at once, the spinners need to take a
		 * MCS (queued) lock first before spinning on the owner field.
		 */
		if (!osq_lock(&lock->osq))
			goto fail;
	}
508

509
	for (;;) {
510 511
		struct task_struct *owner;

512 513 514 515 516
		/* Try to acquire the mutex... */
		owner = __mutex_trylock_or_owner(lock);
		if (!owner)
			break;

517
		/*
518
		 * There's an owner, wait for it to either
519 520
		 * release the lock or go to sleep.
		 */
521
		if (!mutex_spin_on_owner(lock, owner, ww_ctx))
522
			goto fail_unlock;
523

524 525 526 527 528 529
		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
530
		cpu_relax();
531 532
	}

533 534 535 536 537 538 539 540 541 542 543
	if (!waiter)
		osq_unlock(&lock->osq);

	return true;


fail_unlock:
	if (!waiter)
		osq_unlock(&lock->osq);

fail:
544 545 546 547 548
	/*
	 * If we fell out of the spin path because of need_resched(),
	 * reschedule now, before we try-lock the mutex. This avoids getting
	 * scheduled out right after we obtained the mutex.
	 */
549 550 551 552 553 554
	if (need_resched()) {
		/*
		 * We _should_ have TASK_RUNNING here, but just in case
		 * we do not, make it so, otherwise we might get stuck.
		 */
		__set_current_state(TASK_RUNNING);
555
		schedule_preempt_disabled();
556
	}
557 558 559 560

	return false;
}
#else
P
Peter Zijlstra 已提交
561 562 563
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
		      const bool use_ww_ctx, const bool waiter)
564 565 566
{
	return false;
}
567 568
#endif

569
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
I
Ingo Molnar 已提交
570

571
/**
I
Ingo Molnar 已提交
572 573 574 575 576 577 578 579 580 581
 * mutex_unlock - release the mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a not locked mutex is not allowed.
 *
 * This function is similar to (but not equivalent to) up().
 */
582
void __sched mutex_unlock(struct mutex *lock)
I
Ingo Molnar 已提交
583
{
584 585 586
#ifndef CONFIG_DEBUG_LOCK_ALLOC
	if (__mutex_unlock_fast(lock))
		return;
587
#endif
588
	__mutex_unlock_slowpath(lock, _RET_IP_);
I
Ingo Molnar 已提交
589 590 591
}
EXPORT_SYMBOL(mutex_unlock);

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
/**
 * ww_mutex_unlock - release the w/w mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously with any of the
 * ww_mutex_lock* functions (with or without an acquire context). It is
 * forbidden to release the locks after releasing the acquire context.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a unlocked mutex is not allowed.
 */
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
	/*
	 * The unlocking fastpath is the 0->1 transition from 'locked'
	 * into 'unlocked' state:
	 */
	if (lock->ctx) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
#endif
		if (lock->ctx->acquired > 0)
			lock->ctx->acquired--;
		lock->ctx = NULL;
	}

618
	mutex_unlock(&lock->base);
619 620 621 622
}
EXPORT_SYMBOL(ww_mutex_unlock);

static inline int __sched
623 624
__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
			    struct ww_acquire_ctx *ctx)
625 626
{
	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
627
	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
628
	struct mutex_waiter *cur;
629

630 631
	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
		goto deadlock;
632

633 634 635 636 637 638 639 640
	/*
	 * If there is a waiter in front of us that has a context, then its
	 * stamp is earlier than ours and we must back off.
	 */
	cur = waiter;
	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
		if (cur->ww_ctx)
			goto deadlock;
641 642 643
	}

	return 0;
644 645 646 647 648 649 650

deadlock:
#ifdef CONFIG_DEBUG_MUTEXES
	DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
	ctx->contending_lock = ww;
#endif
	return -EDEADLK;
651 652
}

653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
static inline int __sched
__ww_mutex_add_waiter(struct mutex_waiter *waiter,
		      struct mutex *lock,
		      struct ww_acquire_ctx *ww_ctx)
{
	struct mutex_waiter *cur;
	struct list_head *pos;

	if (!ww_ctx) {
		list_add_tail(&waiter->list, &lock->wait_list);
		return 0;
	}

	/*
	 * Add the waiter before the first waiter with a higher stamp.
	 * Waiters without a context are skipped to avoid starving
	 * them.
	 */
	pos = &lock->wait_list;
	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
		if (!cur->ww_ctx)
			continue;

		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
			/* Back off immediately if necessary. */
			if (ww_ctx->acquired > 0) {
#ifdef CONFIG_DEBUG_MUTEXES
				struct ww_mutex *ww;

				ww = container_of(lock, struct ww_mutex, base);
				DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
				ww_ctx->contending_lock = ww;
#endif
				return -EDEADLK;
			}

			break;
		}

		pos = &cur->list;
693 694 695 696 697 698 699 700 701

		/*
		 * Wake up the waiter so that it gets a chance to back
		 * off.
		 */
		if (cur->ww_ctx->acquired > 0) {
			debug_mutex_wake_waiter(lock, cur);
			wake_up_process(cur->task);
		}
702 703 704 705 706 707
	}

	list_add_tail(&waiter->list, pos);
	return 0;
}

I
Ingo Molnar 已提交
708 709 710
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
711
static __always_inline int __sched
P
Peter Zijlstra 已提交
712
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
713
		    struct lockdep_map *nest_lock, unsigned long ip,
714
		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
I
Ingo Molnar 已提交
715 716
{
	struct mutex_waiter waiter;
717
	unsigned long flags;
718
	bool first = false;
719
	struct ww_mutex *ww;
720
	int ret;
I
Ingo Molnar 已提交
721

P
Peter Zijlstra 已提交
722
	might_sleep();
723

P
Peter Zijlstra 已提交
724
	ww = container_of(lock, struct ww_mutex, base);
725
	if (use_ww_ctx && ww_ctx) {
726 727 728 729
		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
			return -EALREADY;
	}

P
Peter Zijlstra 已提交
730
	preempt_disable();
731
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
732

733
	if (__mutex_trylock(lock) ||
734
	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
735
		/* got the lock, yay! */
736
		lock_acquired(&lock->dep_map, ip);
737
		if (use_ww_ctx && ww_ctx)
738
			ww_mutex_set_context_fastpath(ww, ww_ctx);
739 740
		preempt_enable();
		return 0;
741
	}
742

743
	spin_lock_mutex(&lock->wait_lock, flags);
744
	/*
745
	 * After waiting to acquire the wait_lock, try again.
746
	 */
747 748 749 750
	if (__mutex_trylock(lock)) {
		if (use_ww_ctx && ww_ctx)
			__ww_mutex_wakeup_for_backoff(lock, ww_ctx);

751
		goto skip_wait;
752
	}
753

754
	debug_mutex_lock_common(lock, &waiter);
755
	debug_mutex_add_waiter(lock, &waiter, current);
I
Ingo Molnar 已提交
756

757 758 759 760 761 762 763 764 765 766 767 768 769 770
	lock_contended(&lock->dep_map, ip);

	if (!use_ww_ctx) {
		/* add waiting tasks to the end of the waitqueue (FIFO): */
		list_add_tail(&waiter.list, &lock->wait_list);
	} else {
		/* Add in stamp order, waking up waiters that must back off. */
		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
		if (ret)
			goto err_early_backoff;

		waiter.ww_ctx = ww_ctx;
	}

771
	waiter.task = current;
I
Ingo Molnar 已提交
772

773
	if (__mutex_waiter_is_first(lock, &waiter))
774 775
		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);

776
	set_current_state(state);
I
Ingo Molnar 已提交
777
	for (;;) {
778 779 780 781 782 783
		/*
		 * Once we hold wait_lock, we're serialized against
		 * mutex_unlock() handing the lock off to us, do a trylock
		 * before testing the error conditions to make sure we pick up
		 * the handoff.
		 */
784
		if (__mutex_trylock(lock))
785
			goto acquired;
I
Ingo Molnar 已提交
786 787

		/*
788 789 790
		 * Check for signals and wound conditions while holding
		 * wait_lock. This ensures the lock cancellation is ordered
		 * against mutex_unlock() and wake-ups do not go missing.
I
Ingo Molnar 已提交
791
		 */
792
		if (unlikely(signal_pending_state(state, current))) {
793 794 795
			ret = -EINTR;
			goto err;
		}
I
Ingo Molnar 已提交
796

797
		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
798
			ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
799 800
			if (ret)
				goto err;
I
Ingo Molnar 已提交
801
		}
802

803
		spin_unlock_mutex(&lock->wait_lock, flags);
804
		schedule_preempt_disabled();
805

806 807 808 809 810 811 812 813
		/*
		 * ww_mutex needs to always recheck its position since its waiter
		 * list is not FIFO ordered.
		 */
		if ((use_ww_ctx && ww_ctx) || !first) {
			first = __mutex_waiter_is_first(lock, &waiter);
			if (first)
				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
814
		}
815

816
		set_current_state(state);
817 818 819 820 821
		/*
		 * Here we order against unlock; we must either see it change
		 * state back to RUNNING and fall through the next schedule(),
		 * or we must see its unlock and acquire.
		 */
822 823
		if (__mutex_trylock(lock) ||
		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)))
824 825 826
			break;

		spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
827
	}
828 829
	spin_lock_mutex(&lock->wait_lock, flags);
acquired:
830
	__set_current_state(TASK_RUNNING);
831

832
	mutex_remove_waiter(lock, &waiter, current);
833
	if (likely(list_empty(&lock->wait_list)))
834
		__mutex_clear_flag(lock, MUTEX_FLAGS);
835

836
	debug_mutex_free_waiter(&waiter);
I
Ingo Molnar 已提交
837

838 839
skip_wait:
	/* got the lock - cleanup and rejoice! */
P
Peter Zijlstra 已提交
840
	lock_acquired(&lock->dep_map, ip);
I
Ingo Molnar 已提交
841

842
	if (use_ww_ctx && ww_ctx)
843
		ww_mutex_set_context_slowpath(ww, ww_ctx);
844

845
	spin_unlock_mutex(&lock->wait_lock, flags);
P
Peter Zijlstra 已提交
846
	preempt_enable();
I
Ingo Molnar 已提交
847
	return 0;
848 849

err:
850
	__set_current_state(TASK_RUNNING);
851
	mutex_remove_waiter(lock, &waiter, current);
852
err_early_backoff:
853 854 855 856 857
	spin_unlock_mutex(&lock->wait_lock, flags);
	debug_mutex_free_waiter(&waiter);
	mutex_release(&lock->dep_map, 1, ip);
	preempt_enable();
	return ret;
I
Ingo Molnar 已提交
858 859
}

P
Peter Zijlstra 已提交
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
static int __sched
__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
	     struct lockdep_map *nest_lock, unsigned long ip)
{
	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
}

static int __sched
__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
		struct lockdep_map *nest_lock, unsigned long ip,
		struct ww_acquire_ctx *ww_ctx)
{
	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
}

875 876 877 878
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
P
Peter Zijlstra 已提交
879
	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
880 881 882
}

EXPORT_SYMBOL_GPL(mutex_lock_nested);
883

884 885 886
void __sched
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
P
Peter Zijlstra 已提交
887
	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
888 889 890
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);

L
Liam R. Howlett 已提交
891 892 893
int __sched
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
P
Peter Zijlstra 已提交
894
	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
L
Liam R. Howlett 已提交
895 896 897
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);

898 899 900
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
P
Peter Zijlstra 已提交
901
	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
902 903
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
904

905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
	unsigned tmp;

	if (ctx->deadlock_inject_countdown-- == 0) {
		tmp = ctx->deadlock_inject_interval;
		if (tmp > UINT_MAX/4)
			tmp = UINT_MAX;
		else
			tmp = tmp*2 + tmp + tmp/2;

		ctx->deadlock_inject_interval = tmp;
		ctx->deadlock_inject_countdown = tmp;
		ctx->contending_lock = lock;

		ww_mutex_unlock(lock);

		return -EDEADLK;
	}
#endif

	return 0;
}
930 931

int __sched
932
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
933
{
934 935
	int ret;

936
	might_sleep();
P
Peter Zijlstra 已提交
937 938 939
	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
			       ctx);
940
	if (!ret && ctx && ctx->acquired > 1)
941 942 943
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
944
}
945
EXPORT_SYMBOL_GPL(ww_mutex_lock);
946 947

int __sched
948
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
949
{
950 951
	int ret;

952
	might_sleep();
P
Peter Zijlstra 已提交
953 954 955
	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
			      ctx);
956

957
	if (!ret && ctx && ctx->acquired > 1)
958 959 960
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
961
}
962
EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
963

964 965
#endif

I
Ingo Molnar 已提交
966 967 968
/*
 * Release the lock, slowpath:
 */
969
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
I
Ingo Molnar 已提交
970
{
971
	struct task_struct *next = NULL;
972
	unsigned long owner, flags;
973
	DEFINE_WAKE_Q(wake_q);
I
Ingo Molnar 已提交
974

975 976
	mutex_release(&lock->dep_map, 1, ip);

I
Ingo Molnar 已提交
977
	/*
978 979 980 981 982
	 * Release the lock before (potentially) taking the spinlock such that
	 * other contenders can get on with things ASAP.
	 *
	 * Except when HANDOFF, in that case we must not clear the owner field,
	 * but instead set it to the top waiter.
I
Ingo Molnar 已提交
983
	 */
984 985 986 987 988 989
	owner = atomic_long_read(&lock->owner);
	for (;;) {
		unsigned long old;

#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
990
		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
#endif

		if (owner & MUTEX_FLAG_HANDOFF)
			break;

		old = atomic_long_cmpxchg_release(&lock->owner, owner,
						  __owner_flags(owner));
		if (old == owner) {
			if (owner & MUTEX_FLAG_WAITERS)
				break;

			return;
		}

		owner = old;
	}
I
Ingo Molnar 已提交
1007

1008 1009
	spin_lock_mutex(&lock->wait_lock, flags);
	debug_mutex_unlock(lock);
I
Ingo Molnar 已提交
1010 1011 1012
	if (!list_empty(&lock->wait_list)) {
		/* get the first entry from the wait-list: */
		struct mutex_waiter *waiter =
1013 1014 1015 1016
			list_first_entry(&lock->wait_list,
					 struct mutex_waiter, list);

		next = waiter->task;
I
Ingo Molnar 已提交
1017 1018

		debug_mutex_wake_waiter(lock, waiter);
1019
		wake_q_add(&wake_q, next);
I
Ingo Molnar 已提交
1020 1021
	}

1022 1023 1024
	if (owner & MUTEX_FLAG_HANDOFF)
		__mutex_handoff(lock, next);

1025
	spin_unlock_mutex(&lock->wait_lock, flags);
1026

1027
	wake_up_q(&wake_q);
I
Ingo Molnar 已提交
1028 1029
}

P
Peter Zijlstra 已提交
1030
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
1031 1032 1033 1034
/*
 * Here come the less common (and hence less performance-critical) APIs:
 * mutex_lock_interruptible() and mutex_trylock().
 */
1035
static noinline int __sched
1036
__mutex_lock_killable_slowpath(struct mutex *lock);
L
Liam R. Howlett 已提交
1037

1038
static noinline int __sched
1039
__mutex_lock_interruptible_slowpath(struct mutex *lock);
I
Ingo Molnar 已提交
1040

1041 1042
/**
 * mutex_lock_interruptible - acquire the mutex, interruptible
I
Ingo Molnar 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051
 * @lock: the mutex to be acquired
 *
 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
 * been acquired or sleep until the mutex becomes available. If a
 * signal arrives while waiting for the lock then this function
 * returns -EINTR.
 *
 * This function is similar to (but not equivalent to) down_interruptible().
 */
1052
int __sched mutex_lock_interruptible(struct mutex *lock)
I
Ingo Molnar 已提交
1053
{
1054
	might_sleep();
1055 1056

	if (__mutex_trylock_fast(lock))
1057
		return 0;
1058 1059

	return __mutex_lock_interruptible_slowpath(lock);
I
Ingo Molnar 已提交
1060 1061 1062 1063
}

EXPORT_SYMBOL(mutex_lock_interruptible);

1064
int __sched mutex_lock_killable(struct mutex *lock)
L
Liam R. Howlett 已提交
1065 1066
{
	might_sleep();
1067 1068

	if (__mutex_trylock_fast(lock))
1069
		return 0;
1070 1071

	return __mutex_lock_killable_slowpath(lock);
L
Liam R. Howlett 已提交
1072 1073 1074
}
EXPORT_SYMBOL(mutex_lock_killable);

1075 1076
static noinline void __sched
__mutex_lock_slowpath(struct mutex *lock)
P
Peter Zijlstra 已提交
1077
{
P
Peter Zijlstra 已提交
1078
	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
P
Peter Zijlstra 已提交
1079 1080
}

1081
static noinline int __sched
1082
__mutex_lock_killable_slowpath(struct mutex *lock)
L
Liam R. Howlett 已提交
1083
{
P
Peter Zijlstra 已提交
1084
	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
L
Liam R. Howlett 已提交
1085 1086
}

1087
static noinline int __sched
1088
__mutex_lock_interruptible_slowpath(struct mutex *lock)
I
Ingo Molnar 已提交
1089
{
P
Peter Zijlstra 已提交
1090
	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1091 1092 1093 1094 1095
}

static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
P
Peter Zijlstra 已提交
1096 1097
	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
			       _RET_IP_, ctx);
I
Ingo Molnar 已提交
1098
}
1099 1100 1101 1102 1103

static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
					    struct ww_acquire_ctx *ctx)
{
P
Peter Zijlstra 已提交
1104 1105
	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
			       _RET_IP_, ctx);
1106 1107
}

P
Peter Zijlstra 已提交
1108
#endif
I
Ingo Molnar 已提交
1109

1110 1111
/**
 * mutex_trylock - try to acquire the mutex, without waiting
I
Ingo Molnar 已提交
1112 1113 1114 1115 1116 1117
 * @lock: the mutex to be acquired
 *
 * Try to acquire the mutex atomically. Returns 1 if the mutex
 * has been acquired successfully, and 0 on contention.
 *
 * NOTE: this function follows the spin_trylock() convention, so
1118
 * it is negated from the down_trylock() return values! Be careful
I
Ingo Molnar 已提交
1119 1120 1121 1122 1123
 * about this when converting semaphore users to mutexes.
 *
 * This function must not be used in interrupt context. The
 * mutex must be released by the same task that acquired it.
 */
1124
int __sched mutex_trylock(struct mutex *lock)
I
Ingo Molnar 已提交
1125
{
1126
	bool locked = __mutex_trylock(lock);
1127

1128 1129
	if (locked)
		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1130

1131
	return locked;
I
Ingo Molnar 已提交
1132 1133
}
EXPORT_SYMBOL(mutex_trylock);
1134

1135 1136
#ifndef CONFIG_DEBUG_LOCK_ALLOC
int __sched
1137
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1138 1139 1140
{
	might_sleep();

1141
	if (__mutex_trylock_fast(&lock->base)) {
1142 1143
		if (ctx)
			ww_mutex_set_context_fastpath(lock, ctx);
1144 1145 1146 1147
		return 0;
	}

	return __ww_mutex_lock_slowpath(lock, ctx);
1148
}
1149
EXPORT_SYMBOL(ww_mutex_lock);
1150 1151

int __sched
1152
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1153 1154 1155
{
	might_sleep();

1156
	if (__mutex_trylock_fast(&lock->base)) {
1157 1158
		if (ctx)
			ww_mutex_set_context_fastpath(lock, ctx);
1159 1160 1161 1162
		return 0;
	}

	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1163
}
1164
EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1165 1166 1167

#endif

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
/**
 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
 * @cnt: the atomic which we are to dec
 * @lock: the mutex to return holding if we dec to 0
 *
 * return true and hold lock if we dec to 0, return false otherwise
 */
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
	/* dec if we can't possibly hit 0 */
	if (atomic_add_unless(cnt, -1, 1))
		return 0;
	/* we might hit 0, so take the lock */
	mutex_lock(lock);
	if (!atomic_dec_and_test(cnt)) {
		/* when we actually did the dec, we didn't hit 0 */
		mutex_unlock(lock);
		return 0;
	}
	/* we hit 0, and we hold the lock */
	return 1;
}
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);