mutex.c 26.8 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/locking/mutex.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12
 *
 * Mutexes: blocking mutual exclusion locks
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 * David Howells for suggestions and improvements.
 *
13 14 15 16 17
 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 *    from the -rt tree, where it was originally implemented for rtmutexes
 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 *    and Sven Dietrich.
 *
18
 * Also see Documentation/locking/mutex-design.txt.
I
Ingo Molnar 已提交
19 20
 */
#include <linux/mutex.h>
21
#include <linux/ww_mutex.h>
I
Ingo Molnar 已提交
22
#include <linux/sched.h>
23
#include <linux/sched/rt.h>
24
#include <linux/export.h>
I
Ingo Molnar 已提交
25 26
#include <linux/spinlock.h>
#include <linux/interrupt.h>
27
#include <linux/debug_locks.h>
28
#include <linux/osq_lock.h>
I
Ingo Molnar 已提交
29 30 31 32 33 34 35

#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
#else
# include "mutex.h"
#endif

36 37
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
I
Ingo Molnar 已提交
38
{
39
	atomic_long_set(&lock->owner, 0);
I
Ingo Molnar 已提交
40 41
	spin_lock_init(&lock->wait_lock);
	INIT_LIST_HEAD(&lock->wait_list);
42
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
43
	osq_lock_init(&lock->osq);
44
#endif
I
Ingo Molnar 已提交
45

46
	debug_mutex_init(lock, name, key);
I
Ingo Molnar 已提交
47 48 49
}
EXPORT_SYMBOL(__mutex_init);

50 51 52 53 54 55 56
/*
 * @owner: contains: 'struct task_struct *' to the current lock owner,
 * NULL means not owned. Since task_struct pointers are aligned at
 * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
 * bits to store extra state.
 *
 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
57
 * Bit1 indicates unlock needs to hand the lock to the top-waiter
58 59
 */
#define MUTEX_FLAG_WAITERS	0x01
60
#define MUTEX_FLAG_HANDOFF	0x02
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

#define MUTEX_FLAGS		0x03

static inline struct task_struct *__owner_task(unsigned long owner)
{
	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
}

static inline unsigned long __owner_flags(unsigned long owner)
{
	return owner & MUTEX_FLAGS;
}

/*
 * Actual trylock that will work on any unlocked state.
76 77 78 79 80
 *
 * When setting the owner field, we must preserve the low flag bits.
 *
 * Be careful with @handoff, only set that in a wait-loop (where you set
 * HANDOFF) to avoid recursive lock attempts.
81
 */
82
static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
83 84 85 86 87
{
	unsigned long owner, curr = (unsigned long)current;

	owner = atomic_long_read(&lock->owner);
	for (;;) { /* must loop, can race against a flag */
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
		unsigned long old, flags = __owner_flags(owner);

		if (__owner_task(owner)) {
			if (handoff && unlikely(__owner_task(owner) == current)) {
				/*
				 * Provide ACQUIRE semantics for the lock-handoff.
				 *
				 * We cannot easily use load-acquire here, since
				 * the actual load is a failed cmpxchg, which
				 * doesn't imply any barriers.
				 *
				 * Also, this is a fairly unlikely scenario, and
				 * this contains the cost.
				 */
				smp_mb(); /* ACQUIRE */
				return true;
			}
105 106

			return false;
107 108 109 110 111 112 113 114 115
		}

		/*
		 * We set the HANDOFF bit, we must make sure it doesn't live
		 * past the point where we acquire it. This would be possible
		 * if we (accidentally) set the bit on an unlocked mutex.
		 */
		if (handoff)
			flags &= ~MUTEX_FLAG_HANDOFF;
116

117
		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		if (old == owner)
			return true;

		owner = old;
	}
}

#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
 * Lockdep annotations are contained to the slow paths for simplicity.
 * There is nothing that would stop spreading the lockdep annotations outwards
 * except more code.
 */

/*
 * Optimistic trylock that only works in the uncontended case. Make sure to
 * follow with a __mutex_trylock() before failing.
 */
static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
{
	unsigned long curr = (unsigned long)current;

	if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
		return true;

	return false;
}

static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
{
	unsigned long curr = (unsigned long)current;

	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
		return true;

	return false;
}
#endif

static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
{
	atomic_long_or(flag, &lock->owner);
}

static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
{
	atomic_long_andnot(flag, &lock->owner);
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
{
	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
}

/*
 * Give up ownership to a specific task, when @task = NULL, this is equivalent
 * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
 * semantics like a regular unlock, the __mutex_trylock() provides matching
 * ACQUIRE semantics for the handoff.
 */
static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
{
	unsigned long owner = atomic_long_read(&lock->owner);

	for (;;) {
		unsigned long old, new;

#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
#endif

		new = (owner & MUTEX_FLAG_WAITERS);
		new |= (unsigned long)task;

		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
		if (old == owner)
			break;

		owner = old;
	}
}

P
Peter Zijlstra 已提交
200
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
201 202 203 204 205 206
/*
 * We split the mutex lock/unlock logic into separate fastpath and
 * slowpath functions, to reduce the register pressure on the fastpath.
 * We also put the fastpath first in the kernel image, to make sure the
 * branch is predicted by the CPU as default-untaken.
 */
207
static void __sched __mutex_lock_slowpath(struct mutex *lock);
I
Ingo Molnar 已提交
208

209
/**
I
Ingo Molnar 已提交
210 211 212 213 214 215 216 217 218
 * mutex_lock - acquire the mutex
 * @lock: the mutex to be acquired
 *
 * Lock the mutex exclusively for this task. If the mutex is not
 * available right now, it will sleep until it can get it.
 *
 * The mutex must later on be released by the same task that
 * acquired it. Recursive locking is not allowed. The task
 * may not exit without first unlocking the mutex. Also, kernel
219
 * memory where the mutex resides must not be freed with
I
Ingo Molnar 已提交
220 221 222 223 224 225 226 227 228 229
 * the mutex still locked. The mutex must first be initialized
 * (or statically defined) before it can be locked. memset()-ing
 * the mutex to 0 is not allowed.
 *
 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 *   checks that will enforce the restrictions and will also do
 *   deadlock debugging. )
 *
 * This function is similar to (but not equivalent to) down().
 */
230
void __sched mutex_lock(struct mutex *lock)
I
Ingo Molnar 已提交
231
{
232
	might_sleep();
I
Ingo Molnar 已提交
233

234 235 236
	if (!__mutex_trylock_fast(lock))
		__mutex_lock_slowpath(lock);
}
I
Ingo Molnar 已提交
237
EXPORT_SYMBOL(mutex_lock);
P
Peter Zijlstra 已提交
238
#endif
I
Ingo Molnar 已提交
239

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
						   struct ww_acquire_ctx *ww_ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
	/*
	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
	 * but released with a normal mutex_unlock in this call.
	 *
	 * This should never happen, always use ww_mutex_unlock.
	 */
	DEBUG_LOCKS_WARN_ON(ww->ctx);

	/*
	 * Not quite done after calling ww_acquire_done() ?
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);

	if (ww_ctx->contending_lock) {
		/*
		 * After -EDEADLK you tried to
		 * acquire a different ww_mutex? Bad!
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);

		/*
		 * You called ww_mutex_lock after receiving -EDEADLK,
		 * but 'forgot' to unlock everything else first?
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
		ww_ctx->contending_lock = NULL;
	}

	/*
	 * Naughty, using a different class will lead to undefined behavior!
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
#endif
	ww_ctx->acquired++;
}

/*
281
 * After acquiring lock with fastpath or when we lost out in contested
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
 * slowpath, set ctx and wake up any waiters so they can recheck.
 */
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
			       struct ww_acquire_ctx *ctx)
{
	unsigned long flags;
	struct mutex_waiter *cur;

	ww_mutex_lock_acquired(lock, ctx);

	lock->ctx = ctx;

	/*
	 * The lock->ctx update should be visible on all cores before
	 * the atomic read is done, otherwise contended waiters might be
	 * missed. The contended waiters will either see ww_ctx == NULL
	 * and keep spinning, or it will acquire wait_lock, add itself
	 * to waiter list and sleep.
	 */
	smp_mb(); /* ^^^ */

	/*
	 * Check if lock is contended, if not there is nobody to wake up
	 */
307
	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
308 309 310 311 312 313 314 315 316 317 318 319 320 321
		return;

	/*
	 * Uh oh, we raced in fastpath, wake up everyone in this case,
	 * so they can see the new lock->ctx.
	 */
	spin_lock_mutex(&lock->base.wait_lock, flags);
	list_for_each_entry(cur, &lock->base.wait_list, list) {
		debug_mutex_wake_waiter(&lock->base, cur);
		wake_up_process(cur->task);
	}
	spin_unlock_mutex(&lock->base.wait_lock, flags);
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
/*
 * After acquiring lock in the slowpath set ctx and wake up any
 * waiters so they can recheck.
 *
 * Callers must hold the mutex wait_lock.
 */
static __always_inline void
ww_mutex_set_context_slowpath(struct ww_mutex *lock,
			      struct ww_acquire_ctx *ctx)
{
	struct mutex_waiter *cur;

	ww_mutex_lock_acquired(lock, ctx);
	lock->ctx = ctx;

	/*
	 * Give any possible sleeping processes the chance to wake up,
	 * so they can recheck if they have to back off.
	 */
	list_for_each_entry(cur, &lock->base.wait_list, list) {
		debug_mutex_wake_waiter(&lock->base, cur);
		wake_up_process(cur->task);
	}
}
346

347 348 349 350 351 352
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
 * Look out! "owner" is an entirely speculative pointer
 * access and not reliable.
 */
static noinline
353
bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
354
{
355
	bool ret = true;
356

357
	rcu_read_lock();
358
	while (__mutex_owner(lock) == owner) {
359 360
		/*
		 * Ensure we emit the owner->on_cpu, dereference _after_
361 362
		 * checking lock->owner still matches owner. If that fails,
		 * owner might point to freed memory. If it still matches,
363 364 365 366 367 368 369 370
		 * the rcu_read_lock() ensures the memory stays valid.
		 */
		barrier();

		if (!owner->on_cpu || need_resched()) {
			ret = false;
			break;
		}
371

372
		cpu_relax();
373 374 375
	}
	rcu_read_unlock();

376
	return ret;
377
}
378 379 380 381 382 383

/*
 * Initial check for entering the mutex spinning loop
 */
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
384
	struct task_struct *owner;
385 386
	int retval = 1;

387 388 389
	if (need_resched())
		return 0;

390
	rcu_read_lock();
391
	owner = __mutex_owner(lock);
392 393
	if (owner)
		retval = owner->on_cpu;
394
	rcu_read_unlock();
395

396
	/*
397 398 399
	 * If lock->owner is not set, the mutex has been released. Return true
	 * such that we'll trylock in the spin path, which is a faster option
	 * than the blocking slow path.
400 401 402
	 */
	return retval;
}
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418

/*
 * Optimistic spinning.
 *
 * We try to spin for acquisition when we find that the lock owner
 * is currently running on a (different) CPU and while we don't
 * need to reschedule. The rationale is that if the lock owner is
 * running, it is likely to release the lock soon.
 *
 * The mutex spinners are queued up using MCS lock so that only one
 * spinner can compete for the mutex. However, if mutex spinning isn't
 * going to happen, there is no point in going through the lock/unlock
 * overhead.
 *
 * Returns true when the lock was taken, otherwise false, indicating
 * that we need to jump to the slowpath and sleep.
419 420 421 422 423
 *
 * The waiter flag is set to true if the spinner is a waiter in the wait
 * queue. The waiter-spinner will spin on the lock directly and concurrently
 * with the spinner at the head of the OSQ, if present, until the owner is
 * changed to itself.
424 425
 */
static bool mutex_optimistic_spin(struct mutex *lock,
426 427
				  struct ww_acquire_ctx *ww_ctx,
				  const bool use_ww_ctx, const bool waiter)
428 429 430
{
	struct task_struct *task = current;

431 432 433 434 435 436 437 438 439 440
	if (!waiter) {
		/*
		 * The purpose of the mutex_can_spin_on_owner() function is
		 * to eliminate the overhead of osq_lock() and osq_unlock()
		 * in case spinning isn't possible. As a waiter-spinner
		 * is not going to take OSQ lock anyway, there is no need
		 * to call mutex_can_spin_on_owner().
		 */
		if (!mutex_can_spin_on_owner(lock))
			goto fail;
441

442 443 444 445 446 447 448 449
		/*
		 * In order to avoid a stampede of mutex spinners trying to
		 * acquire the mutex all at once, the spinners need to take a
		 * MCS (queued) lock first before spinning on the owner field.
		 */
		if (!osq_lock(&lock->osq))
			goto fail;
	}
450

451
	for (;;) {
452 453 454 455 456 457 458 459 460 461 462 463 464 465
		struct task_struct *owner;

		if (use_ww_ctx && ww_ctx->acquired > 0) {
			struct ww_mutex *ww;

			ww = container_of(lock, struct ww_mutex, base);
			/*
			 * If ww->ctx is set the contents are undefined, only
			 * by acquiring wait_lock there is a guarantee that
			 * they are not invalid when reading.
			 *
			 * As such, when deadlock detection needs to be
			 * performed the optimistic spinning cannot be done.
			 */
466
			if (READ_ONCE(ww->ctx))
467
				goto fail_unlock;
468 469 470 471 472 473
		}

		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
474
		owner = __mutex_owner(lock);
475 476 477 478 479
		if (owner) {
			if (waiter && owner == task) {
				smp_mb(); /* ACQUIRE */
				break;
			}
480

481 482
			if (!mutex_spin_on_owner(lock, owner))
				goto fail_unlock;
483 484
		}

485 486 487 488
		/* Try to acquire the mutex if it is unlocked. */
		if (__mutex_trylock(lock, waiter))
			break;

489 490 491 492 493 494
		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
495
		cpu_relax();
496 497
	}

498 499 500 501 502 503 504 505 506 507 508
	if (!waiter)
		osq_unlock(&lock->osq);

	return true;


fail_unlock:
	if (!waiter)
		osq_unlock(&lock->osq);

fail:
509 510 511 512 513
	/*
	 * If we fell out of the spin path because of need_resched(),
	 * reschedule now, before we try-lock the mutex. This avoids getting
	 * scheduled out right after we obtained the mutex.
	 */
514 515 516 517 518 519
	if (need_resched()) {
		/*
		 * We _should_ have TASK_RUNNING here, but just in case
		 * we do not, make it so, otherwise we might get stuck.
		 */
		__set_current_state(TASK_RUNNING);
520
		schedule_preempt_disabled();
521
	}
522 523 524 525 526

	return false;
}
#else
static bool mutex_optimistic_spin(struct mutex *lock,
527 528
				  struct ww_acquire_ctx *ww_ctx,
				  const bool use_ww_ctx, const bool waiter)
529 530 531
{
	return false;
}
532 533
#endif

534
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
I
Ingo Molnar 已提交
535

536
/**
I
Ingo Molnar 已提交
537 538 539 540 541 542 543 544 545 546
 * mutex_unlock - release the mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a not locked mutex is not allowed.
 *
 * This function is similar to (but not equivalent to) up().
 */
547
void __sched mutex_unlock(struct mutex *lock)
I
Ingo Molnar 已提交
548
{
549 550 551
#ifndef CONFIG_DEBUG_LOCK_ALLOC
	if (__mutex_unlock_fast(lock))
		return;
552
#endif
553
	__mutex_unlock_slowpath(lock, _RET_IP_);
I
Ingo Molnar 已提交
554 555 556
}
EXPORT_SYMBOL(mutex_unlock);

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
/**
 * ww_mutex_unlock - release the w/w mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously with any of the
 * ww_mutex_lock* functions (with or without an acquire context). It is
 * forbidden to release the locks after releasing the acquire context.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a unlocked mutex is not allowed.
 */
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
	/*
	 * The unlocking fastpath is the 0->1 transition from 'locked'
	 * into 'unlocked' state:
	 */
	if (lock->ctx) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
#endif
		if (lock->ctx->acquired > 0)
			lock->ctx->acquired--;
		lock->ctx = NULL;
	}

583
	mutex_unlock(&lock->base);
584 585 586 587
}
EXPORT_SYMBOL(ww_mutex_unlock);

static inline int __sched
588
__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
589 590
{
	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
591
	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607

	if (!hold_ctx)
		return 0;

	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
		ctx->contending_lock = ww;
#endif
		return -EDEADLK;
	}

	return 0;
}

I
Ingo Molnar 已提交
608 609 610
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
611
static __always_inline int __sched
P
Peter Zijlstra 已提交
612
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
613
		    struct lockdep_map *nest_lock, unsigned long ip,
614
		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
I
Ingo Molnar 已提交
615 616 617
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
618
	unsigned long flags;
619
	bool first = false;
620
	struct ww_mutex *ww;
621
	int ret;
I
Ingo Molnar 已提交
622

623
	if (use_ww_ctx) {
624
		ww = container_of(lock, struct ww_mutex, base);
625 626 627 628
		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
			return -EALREADY;
	}

P
Peter Zijlstra 已提交
629
	preempt_disable();
630
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
631

632
	if (__mutex_trylock(lock, false) ||
633
	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
634
		/* got the lock, yay! */
635
		lock_acquired(&lock->dep_map, ip);
636
		if (use_ww_ctx)
637
			ww_mutex_set_context_fastpath(ww, ww_ctx);
638 639
		preempt_enable();
		return 0;
640
	}
641

642
	spin_lock_mutex(&lock->wait_lock, flags);
643
	/*
644
	 * After waiting to acquire the wait_lock, try again.
645
	 */
646
	if (__mutex_trylock(lock, false))
647 648
		goto skip_wait;

649
	debug_mutex_lock_common(lock, &waiter);
650
	debug_mutex_add_waiter(lock, &waiter, task);
I
Ingo Molnar 已提交
651 652 653 654 655

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

656
	if (__mutex_waiter_is_first(lock, &waiter))
657 658
		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);

P
Peter Zijlstra 已提交
659
	lock_contended(&lock->dep_map, ip);
660

661
	set_task_state(task, state);
I
Ingo Molnar 已提交
662
	for (;;) {
663 664 665 666 667 668
		/*
		 * Once we hold wait_lock, we're serialized against
		 * mutex_unlock() handing the lock off to us, do a trylock
		 * before testing the error conditions to make sure we pick up
		 * the handoff.
		 */
669
		if (__mutex_trylock(lock, first))
670
			goto acquired;
I
Ingo Molnar 已提交
671 672

		/*
673 674 675
		 * Check for signals and wound conditions while holding
		 * wait_lock. This ensures the lock cancellation is ordered
		 * against mutex_unlock() and wake-ups do not go missing.
I
Ingo Molnar 已提交
676
		 */
677
		if (unlikely(signal_pending_state(state, task))) {
678 679 680
			ret = -EINTR;
			goto err;
		}
I
Ingo Molnar 已提交
681

682
		if (use_ww_ctx && ww_ctx->acquired > 0) {
683
			ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
684 685
			if (ret)
				goto err;
I
Ingo Molnar 已提交
686
		}
687

688
		spin_unlock_mutex(&lock->wait_lock, flags);
689
		schedule_preempt_disabled();
690 691 692 693 694

		if (!first && __mutex_waiter_is_first(lock, &waiter)) {
			first = true;
			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
		}
695 696 697 698 699 700 701

		set_task_state(task, state);
		/*
		 * Here we order against unlock; we must either see it change
		 * state back to RUNNING and fall through the next schedule(),
		 * or we must see its unlock and acquire.
		 */
702 703
		if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
		     __mutex_trylock(lock, first))
704 705 706
			break;

		spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
707
	}
708 709
	spin_lock_mutex(&lock->wait_lock, flags);
acquired:
710 711
	__set_task_state(task, TASK_RUNNING);

712
	mutex_remove_waiter(lock, &waiter, task);
713
	if (likely(list_empty(&lock->wait_list)))
714
		__mutex_clear_flag(lock, MUTEX_FLAGS);
715

716
	debug_mutex_free_waiter(&waiter);
I
Ingo Molnar 已提交
717

718 719
skip_wait:
	/* got the lock - cleanup and rejoice! */
P
Peter Zijlstra 已提交
720
	lock_acquired(&lock->dep_map, ip);
I
Ingo Molnar 已提交
721

722
	if (use_ww_ctx)
723
		ww_mutex_set_context_slowpath(ww, ww_ctx);
724

725
	spin_unlock_mutex(&lock->wait_lock, flags);
P
Peter Zijlstra 已提交
726
	preempt_enable();
I
Ingo Molnar 已提交
727
	return 0;
728 729

err:
730
	__set_task_state(task, TASK_RUNNING);
731
	mutex_remove_waiter(lock, &waiter, task);
732 733 734 735 736
	spin_unlock_mutex(&lock->wait_lock, flags);
	debug_mutex_free_waiter(&waiter);
	mutex_release(&lock->dep_map, 1, ip);
	preempt_enable();
	return ret;
I
Ingo Molnar 已提交
737 738
}

739 740 741 742 743
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
744
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
745
			    subclass, NULL, _RET_IP_, NULL, 0);
746 747 748
}

EXPORT_SYMBOL_GPL(mutex_lock_nested);
749

750 751 752 753
void __sched
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
	might_sleep();
754
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
755
			    0, nest, _RET_IP_, NULL, 0);
756 757 758
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);

L
Liam R. Howlett 已提交
759 760 761 762
int __sched
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
763
	return __mutex_lock_common(lock, TASK_KILLABLE,
764
				   subclass, NULL, _RET_IP_, NULL, 0);
L
Liam R. Howlett 已提交
765 766 767
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);

768 769 770 771
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
772
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
773
				   subclass, NULL, _RET_IP_, NULL, 0);
774 775
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
776

777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
	unsigned tmp;

	if (ctx->deadlock_inject_countdown-- == 0) {
		tmp = ctx->deadlock_inject_interval;
		if (tmp > UINT_MAX/4)
			tmp = UINT_MAX;
		else
			tmp = tmp*2 + tmp + tmp/2;

		ctx->deadlock_inject_interval = tmp;
		ctx->deadlock_inject_countdown = tmp;
		ctx->contending_lock = lock;

		ww_mutex_unlock(lock);

		return -EDEADLK;
	}
#endif

	return 0;
}
802 803 804 805

int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
806 807
	int ret;

808
	might_sleep();
809
	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
810
				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
811
	if (!ret && ctx->acquired > 1)
812 813 814
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
815 816 817 818 819 820
}
EXPORT_SYMBOL_GPL(__ww_mutex_lock);

int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
821 822
	int ret;

823
	might_sleep();
824
	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
825
				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
826

827
	if (!ret && ctx->acquired > 1)
828 829 830
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
831 832 833
}
EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);

834 835
#endif

I
Ingo Molnar 已提交
836 837 838
/*
 * Release the lock, slowpath:
 */
839
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
I
Ingo Molnar 已提交
840
{
841
	struct task_struct *next = NULL;
842
	unsigned long owner, flags;
843
	WAKE_Q(wake_q);
I
Ingo Molnar 已提交
844

845 846
	mutex_release(&lock->dep_map, 1, ip);

I
Ingo Molnar 已提交
847
	/*
848 849 850 851 852
	 * Release the lock before (potentially) taking the spinlock such that
	 * other contenders can get on with things ASAP.
	 *
	 * Except when HANDOFF, in that case we must not clear the owner field,
	 * but instead set it to the top waiter.
I
Ingo Molnar 已提交
853
	 */
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
	owner = atomic_long_read(&lock->owner);
	for (;;) {
		unsigned long old;

#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
#endif

		if (owner & MUTEX_FLAG_HANDOFF)
			break;

		old = atomic_long_cmpxchg_release(&lock->owner, owner,
						  __owner_flags(owner));
		if (old == owner) {
			if (owner & MUTEX_FLAG_WAITERS)
				break;

			return;
		}

		owner = old;
	}
I
Ingo Molnar 已提交
876

877 878
	spin_lock_mutex(&lock->wait_lock, flags);
	debug_mutex_unlock(lock);
I
Ingo Molnar 已提交
879 880 881
	if (!list_empty(&lock->wait_list)) {
		/* get the first entry from the wait-list: */
		struct mutex_waiter *waiter =
882 883 884 885
			list_first_entry(&lock->wait_list,
					 struct mutex_waiter, list);

		next = waiter->task;
I
Ingo Molnar 已提交
886 887

		debug_mutex_wake_waiter(lock, waiter);
888
		wake_q_add(&wake_q, next);
I
Ingo Molnar 已提交
889 890
	}

891 892 893
	if (owner & MUTEX_FLAG_HANDOFF)
		__mutex_handoff(lock, next);

894
	spin_unlock_mutex(&lock->wait_lock, flags);
895

896
	wake_up_q(&wake_q);
I
Ingo Molnar 已提交
897 898
}

P
Peter Zijlstra 已提交
899
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
900 901 902 903
/*
 * Here come the less common (and hence less performance-critical) APIs:
 * mutex_lock_interruptible() and mutex_trylock().
 */
904
static noinline int __sched
905
__mutex_lock_killable_slowpath(struct mutex *lock);
L
Liam R. Howlett 已提交
906

907
static noinline int __sched
908
__mutex_lock_interruptible_slowpath(struct mutex *lock);
I
Ingo Molnar 已提交
909

910 911
/**
 * mutex_lock_interruptible - acquire the mutex, interruptible
I
Ingo Molnar 已提交
912 913 914 915 916 917 918 919 920
 * @lock: the mutex to be acquired
 *
 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
 * been acquired or sleep until the mutex becomes available. If a
 * signal arrives while waiting for the lock then this function
 * returns -EINTR.
 *
 * This function is similar to (but not equivalent to) down_interruptible().
 */
921
int __sched mutex_lock_interruptible(struct mutex *lock)
I
Ingo Molnar 已提交
922
{
923
	might_sleep();
924 925

	if (__mutex_trylock_fast(lock))
926
		return 0;
927 928

	return __mutex_lock_interruptible_slowpath(lock);
I
Ingo Molnar 已提交
929 930 931 932
}

EXPORT_SYMBOL(mutex_lock_interruptible);

933
int __sched mutex_lock_killable(struct mutex *lock)
L
Liam R. Howlett 已提交
934 935
{
	might_sleep();
936 937

	if (__mutex_trylock_fast(lock))
938
		return 0;
939 940

	return __mutex_lock_killable_slowpath(lock);
L
Liam R. Howlett 已提交
941 942 943
}
EXPORT_SYMBOL(mutex_lock_killable);

944 945
static noinline void __sched
__mutex_lock_slowpath(struct mutex *lock)
P
Peter Zijlstra 已提交
946
{
947
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
948
			    NULL, _RET_IP_, NULL, 0);
P
Peter Zijlstra 已提交
949 950
}

951
static noinline int __sched
952
__mutex_lock_killable_slowpath(struct mutex *lock)
L
Liam R. Howlett 已提交
953
{
954
	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
955
				   NULL, _RET_IP_, NULL, 0);
L
Liam R. Howlett 已提交
956 957
}

958
static noinline int __sched
959
__mutex_lock_interruptible_slowpath(struct mutex *lock)
I
Ingo Molnar 已提交
960
{
961
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
962
				   NULL, _RET_IP_, NULL, 0);
963 964 965 966 967 968
}

static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
969
				   NULL, _RET_IP_, ctx, 1);
I
Ingo Molnar 已提交
970
}
971 972 973 974 975 976

static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
					    struct ww_acquire_ctx *ctx)
{
	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
977
				   NULL, _RET_IP_, ctx, 1);
978 979
}

P
Peter Zijlstra 已提交
980
#endif
I
Ingo Molnar 已提交
981

982 983
/**
 * mutex_trylock - try to acquire the mutex, without waiting
I
Ingo Molnar 已提交
984 985 986 987 988 989
 * @lock: the mutex to be acquired
 *
 * Try to acquire the mutex atomically. Returns 1 if the mutex
 * has been acquired successfully, and 0 on contention.
 *
 * NOTE: this function follows the spin_trylock() convention, so
990
 * it is negated from the down_trylock() return values! Be careful
I
Ingo Molnar 已提交
991 992 993 994 995
 * about this when converting semaphore users to mutexes.
 *
 * This function must not be used in interrupt context. The
 * mutex must be released by the same task that acquired it.
 */
996
int __sched mutex_trylock(struct mutex *lock)
I
Ingo Molnar 已提交
997
{
998
	bool locked = __mutex_trylock(lock, false);
999

1000 1001
	if (locked)
		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1002

1003
	return locked;
I
Ingo Molnar 已提交
1004 1005
}
EXPORT_SYMBOL(mutex_trylock);
1006

1007 1008 1009 1010 1011 1012
#ifndef CONFIG_DEBUG_LOCK_ALLOC
int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	might_sleep();

1013
	if (__mutex_trylock_fast(&lock->base)) {
1014
		ww_mutex_set_context_fastpath(lock, ctx);
1015 1016 1017 1018
		return 0;
	}

	return __ww_mutex_lock_slowpath(lock, ctx);
1019 1020 1021 1022 1023 1024 1025 1026
}
EXPORT_SYMBOL(__ww_mutex_lock);

int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	might_sleep();

1027
	if (__mutex_trylock_fast(&lock->base)) {
1028
		ww_mutex_set_context_fastpath(lock, ctx);
1029 1030 1031 1032
		return 0;
	}

	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1033 1034 1035 1036 1037
}
EXPORT_SYMBOL(__ww_mutex_lock_interruptible);

#endif

1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
/**
 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
 * @cnt: the atomic which we are to dec
 * @lock: the mutex to return holding if we dec to 0
 *
 * return true and hold lock if we dec to 0, return false otherwise
 */
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
	/* dec if we can't possibly hit 0 */
	if (atomic_add_unless(cnt, -1, 1))
		return 0;
	/* we might hit 0, so take the lock */
	mutex_lock(lock);
	if (!atomic_dec_and_test(cnt)) {
		/* when we actually did the dec, we didn't hit 0 */
		mutex_unlock(lock);
		return 0;
	}
	/* we hit 0, and we hold the lock */
	return 1;
}
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);