mutex.c 26.1 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/locking/mutex.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12
 *
 * Mutexes: blocking mutual exclusion locks
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 * David Howells for suggestions and improvements.
 *
13 14 15 16 17
 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 *    from the -rt tree, where it was originally implemented for rtmutexes
 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 *    and Sven Dietrich.
 *
18
 * Also see Documentation/locking/mutex-design.txt.
I
Ingo Molnar 已提交
19 20
 */
#include <linux/mutex.h>
21
#include <linux/ww_mutex.h>
I
Ingo Molnar 已提交
22
#include <linux/sched.h>
23
#include <linux/sched/rt.h>
24
#include <linux/export.h>
I
Ingo Molnar 已提交
25 26
#include <linux/spinlock.h>
#include <linux/interrupt.h>
27
#include <linux/debug_locks.h>
28
#include <linux/osq_lock.h>
I
Ingo Molnar 已提交
29 30 31 32 33 34 35

#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
#else
# include "mutex.h"
#endif

36 37
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
I
Ingo Molnar 已提交
38
{
39
	atomic_long_set(&lock->owner, 0);
I
Ingo Molnar 已提交
40 41
	spin_lock_init(&lock->wait_lock);
	INIT_LIST_HEAD(&lock->wait_list);
42
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
43
	osq_lock_init(&lock->osq);
44
#endif
I
Ingo Molnar 已提交
45

46
	debug_mutex_init(lock, name, key);
I
Ingo Molnar 已提交
47 48 49
}
EXPORT_SYMBOL(__mutex_init);

50 51 52 53 54 55 56
/*
 * @owner: contains: 'struct task_struct *' to the current lock owner,
 * NULL means not owned. Since task_struct pointers are aligned at
 * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
 * bits to store extra state.
 *
 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
57
 * Bit1 indicates unlock needs to hand the lock to the top-waiter
58 59
 */
#define MUTEX_FLAG_WAITERS	0x01
60
#define MUTEX_FLAG_HANDOFF	0x02
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

#define MUTEX_FLAGS		0x03

static inline struct task_struct *__owner_task(unsigned long owner)
{
	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
}

static inline unsigned long __owner_flags(unsigned long owner)
{
	return owner & MUTEX_FLAGS;
}

/*
 * Actual trylock that will work on any unlocked state.
76 77 78 79 80
 *
 * When setting the owner field, we must preserve the low flag bits.
 *
 * Be careful with @handoff, only set that in a wait-loop (where you set
 * HANDOFF) to avoid recursive lock attempts.
81
 */
82
static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
83 84 85 86 87
{
	unsigned long owner, curr = (unsigned long)current;

	owner = atomic_long_read(&lock->owner);
	for (;;) { /* must loop, can race against a flag */
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
		unsigned long old, flags = __owner_flags(owner);

		if (__owner_task(owner)) {
			if (handoff && unlikely(__owner_task(owner) == current)) {
				/*
				 * Provide ACQUIRE semantics for the lock-handoff.
				 *
				 * We cannot easily use load-acquire here, since
				 * the actual load is a failed cmpxchg, which
				 * doesn't imply any barriers.
				 *
				 * Also, this is a fairly unlikely scenario, and
				 * this contains the cost.
				 */
				smp_mb(); /* ACQUIRE */
				return true;
			}
105 106

			return false;
107 108 109 110 111 112 113 114 115
		}

		/*
		 * We set the HANDOFF bit, we must make sure it doesn't live
		 * past the point where we acquire it. This would be possible
		 * if we (accidentally) set the bit on an unlocked mutex.
		 */
		if (handoff)
			flags &= ~MUTEX_FLAG_HANDOFF;
116

117
		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		if (old == owner)
			return true;

		owner = old;
	}
}

#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
 * Lockdep annotations are contained to the slow paths for simplicity.
 * There is nothing that would stop spreading the lockdep annotations outwards
 * except more code.
 */

/*
 * Optimistic trylock that only works in the uncontended case. Make sure to
 * follow with a __mutex_trylock() before failing.
 */
static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
{
	unsigned long curr = (unsigned long)current;

	if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
		return true;

	return false;
}

static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
{
	unsigned long curr = (unsigned long)current;

	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
		return true;

	return false;
}
#endif

static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
{
	atomic_long_or(flag, &lock->owner);
}

static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
{
	atomic_long_andnot(flag, &lock->owner);
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
{
	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
}

/*
 * Give up ownership to a specific task, when @task = NULL, this is equivalent
 * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
 * semantics like a regular unlock, the __mutex_trylock() provides matching
 * ACQUIRE semantics for the handoff.
 */
static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
{
	unsigned long owner = atomic_long_read(&lock->owner);

	for (;;) {
		unsigned long old, new;

#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
#endif

		new = (owner & MUTEX_FLAG_WAITERS);
		new |= (unsigned long)task;

		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
		if (old == owner)
			break;

		owner = old;
	}
}

P
Peter Zijlstra 已提交
200
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
201 202 203 204 205 206
/*
 * We split the mutex lock/unlock logic into separate fastpath and
 * slowpath functions, to reduce the register pressure on the fastpath.
 * We also put the fastpath first in the kernel image, to make sure the
 * branch is predicted by the CPU as default-untaken.
 */
207
static void __sched __mutex_lock_slowpath(struct mutex *lock);
I
Ingo Molnar 已提交
208

209
/**
I
Ingo Molnar 已提交
210 211 212 213 214 215 216 217 218
 * mutex_lock - acquire the mutex
 * @lock: the mutex to be acquired
 *
 * Lock the mutex exclusively for this task. If the mutex is not
 * available right now, it will sleep until it can get it.
 *
 * The mutex must later on be released by the same task that
 * acquired it. Recursive locking is not allowed. The task
 * may not exit without first unlocking the mutex. Also, kernel
219
 * memory where the mutex resides must not be freed with
I
Ingo Molnar 已提交
220 221 222 223 224 225 226 227 228 229
 * the mutex still locked. The mutex must first be initialized
 * (or statically defined) before it can be locked. memset()-ing
 * the mutex to 0 is not allowed.
 *
 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 *   checks that will enforce the restrictions and will also do
 *   deadlock debugging. )
 *
 * This function is similar to (but not equivalent to) down().
 */
230
void __sched mutex_lock(struct mutex *lock)
I
Ingo Molnar 已提交
231
{
232
	might_sleep();
I
Ingo Molnar 已提交
233

234 235 236
	if (!__mutex_trylock_fast(lock))
		__mutex_lock_slowpath(lock);
}
I
Ingo Molnar 已提交
237
EXPORT_SYMBOL(mutex_lock);
P
Peter Zijlstra 已提交
238
#endif
I
Ingo Molnar 已提交
239

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
						   struct ww_acquire_ctx *ww_ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
	/*
	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
	 * but released with a normal mutex_unlock in this call.
	 *
	 * This should never happen, always use ww_mutex_unlock.
	 */
	DEBUG_LOCKS_WARN_ON(ww->ctx);

	/*
	 * Not quite done after calling ww_acquire_done() ?
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);

	if (ww_ctx->contending_lock) {
		/*
		 * After -EDEADLK you tried to
		 * acquire a different ww_mutex? Bad!
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);

		/*
		 * You called ww_mutex_lock after receiving -EDEADLK,
		 * but 'forgot' to unlock everything else first?
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
		ww_ctx->contending_lock = NULL;
	}

	/*
	 * Naughty, using a different class will lead to undefined behavior!
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
#endif
	ww_ctx->acquired++;
}

/*
281
 * After acquiring lock with fastpath or when we lost out in contested
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
 * slowpath, set ctx and wake up any waiters so they can recheck.
 */
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
			       struct ww_acquire_ctx *ctx)
{
	unsigned long flags;
	struct mutex_waiter *cur;

	ww_mutex_lock_acquired(lock, ctx);

	lock->ctx = ctx;

	/*
	 * The lock->ctx update should be visible on all cores before
	 * the atomic read is done, otherwise contended waiters might be
	 * missed. The contended waiters will either see ww_ctx == NULL
	 * and keep spinning, or it will acquire wait_lock, add itself
	 * to waiter list and sleep.
	 */
	smp_mb(); /* ^^^ */

	/*
	 * Check if lock is contended, if not there is nobody to wake up
	 */
307
	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
308 309 310 311 312 313 314 315 316 317 318 319 320 321
		return;

	/*
	 * Uh oh, we raced in fastpath, wake up everyone in this case,
	 * so they can see the new lock->ctx.
	 */
	spin_lock_mutex(&lock->base.wait_lock, flags);
	list_for_each_entry(cur, &lock->base.wait_list, list) {
		debug_mutex_wake_waiter(&lock->base, cur);
		wake_up_process(cur->task);
	}
	spin_unlock_mutex(&lock->base.wait_lock, flags);
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
/*
 * After acquiring lock in the slowpath set ctx and wake up any
 * waiters so they can recheck.
 *
 * Callers must hold the mutex wait_lock.
 */
static __always_inline void
ww_mutex_set_context_slowpath(struct ww_mutex *lock,
			      struct ww_acquire_ctx *ctx)
{
	struct mutex_waiter *cur;

	ww_mutex_lock_acquired(lock, ctx);
	lock->ctx = ctx;

	/*
	 * Give any possible sleeping processes the chance to wake up,
	 * so they can recheck if they have to back off.
	 */
	list_for_each_entry(cur, &lock->base.wait_list, list) {
		debug_mutex_wake_waiter(&lock->base, cur);
		wake_up_process(cur->task);
	}
}
346

347 348 349 350 351 352
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
 * Look out! "owner" is an entirely speculative pointer
 * access and not reliable.
 */
static noinline
353
bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
354
{
355
	bool ret = true;
356

357
	rcu_read_lock();
358
	while (__mutex_owner(lock) == owner) {
359 360
		/*
		 * Ensure we emit the owner->on_cpu, dereference _after_
361 362
		 * checking lock->owner still matches owner. If that fails,
		 * owner might point to freed memory. If it still matches,
363 364 365 366 367 368 369 370
		 * the rcu_read_lock() ensures the memory stays valid.
		 */
		barrier();

		if (!owner->on_cpu || need_resched()) {
			ret = false;
			break;
		}
371

372
		cpu_relax_lowlatency();
373 374 375
	}
	rcu_read_unlock();

376
	return ret;
377
}
378 379 380 381 382 383

/*
 * Initial check for entering the mutex spinning loop
 */
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
384
	struct task_struct *owner;
385 386
	int retval = 1;

387 388 389
	if (need_resched())
		return 0;

390
	rcu_read_lock();
391
	owner = __mutex_owner(lock);
392 393
	if (owner)
		retval = owner->on_cpu;
394
	rcu_read_unlock();
395

396
	/*
397 398 399
	 * If lock->owner is not set, the mutex has been released. Return true
	 * such that we'll trylock in the spin path, which is a faster option
	 * than the blocking slow path.
400 401 402
	 */
	return retval;
}
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427

/*
 * Optimistic spinning.
 *
 * We try to spin for acquisition when we find that the lock owner
 * is currently running on a (different) CPU and while we don't
 * need to reschedule. The rationale is that if the lock owner is
 * running, it is likely to release the lock soon.
 *
 * The mutex spinners are queued up using MCS lock so that only one
 * spinner can compete for the mutex. However, if mutex spinning isn't
 * going to happen, there is no point in going through the lock/unlock
 * overhead.
 *
 * Returns true when the lock was taken, otherwise false, indicating
 * that we need to jump to the slowpath and sleep.
 */
static bool mutex_optimistic_spin(struct mutex *lock,
				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
	struct task_struct *task = current;

	if (!mutex_can_spin_on_owner(lock))
		goto done;

428 429 430 431 432
	/*
	 * In order to avoid a stampede of mutex spinners trying to
	 * acquire the mutex all at once, the spinners need to take a
	 * MCS (queued) lock first before spinning on the owner field.
	 */
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
	if (!osq_lock(&lock->osq))
		goto done;

	while (true) {
		struct task_struct *owner;

		if (use_ww_ctx && ww_ctx->acquired > 0) {
			struct ww_mutex *ww;

			ww = container_of(lock, struct ww_mutex, base);
			/*
			 * If ww->ctx is set the contents are undefined, only
			 * by acquiring wait_lock there is a guarantee that
			 * they are not invalid when reading.
			 *
			 * As such, when deadlock detection needs to be
			 * performed the optimistic spinning cannot be done.
			 */
451
			if (READ_ONCE(ww->ctx))
452 453 454 455 456 457 458
				break;
		}

		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
459
		owner = __mutex_owner(lock);
460 461 462 463
		if (owner && !mutex_spin_on_owner(lock, owner))
			break;

		/* Try to acquire the mutex if it is unlocked. */
464
		if (__mutex_trylock(lock, false)) {
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
			osq_unlock(&lock->osq);
			return true;
		}

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		cpu_relax_lowlatency();
	}

	osq_unlock(&lock->osq);
done:
	/*
	 * If we fell out of the spin path because of need_resched(),
	 * reschedule now, before we try-lock the mutex. This avoids getting
	 * scheduled out right after we obtained the mutex.
	 */
485 486 487 488 489 490
	if (need_resched()) {
		/*
		 * We _should_ have TASK_RUNNING here, but just in case
		 * we do not, make it so, otherwise we might get stuck.
		 */
		__set_current_state(TASK_RUNNING);
491
		schedule_preempt_disabled();
492
	}
493 494 495 496 497 498 499 500 501

	return false;
}
#else
static bool mutex_optimistic_spin(struct mutex *lock,
				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
	return false;
}
502 503
#endif

504
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
I
Ingo Molnar 已提交
505

506
/**
I
Ingo Molnar 已提交
507 508 509 510 511 512 513 514 515 516
 * mutex_unlock - release the mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a not locked mutex is not allowed.
 *
 * This function is similar to (but not equivalent to) up().
 */
517
void __sched mutex_unlock(struct mutex *lock)
I
Ingo Molnar 已提交
518
{
519 520 521
#ifndef CONFIG_DEBUG_LOCK_ALLOC
	if (__mutex_unlock_fast(lock))
		return;
522
#endif
523
	__mutex_unlock_slowpath(lock, _RET_IP_);
I
Ingo Molnar 已提交
524 525 526
}
EXPORT_SYMBOL(mutex_unlock);

527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
/**
 * ww_mutex_unlock - release the w/w mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously with any of the
 * ww_mutex_lock* functions (with or without an acquire context). It is
 * forbidden to release the locks after releasing the acquire context.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a unlocked mutex is not allowed.
 */
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
	/*
	 * The unlocking fastpath is the 0->1 transition from 'locked'
	 * into 'unlocked' state:
	 */
	if (lock->ctx) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
#endif
		if (lock->ctx->acquired > 0)
			lock->ctx->acquired--;
		lock->ctx = NULL;
	}

553
	mutex_unlock(&lock->base);
554 555 556 557
}
EXPORT_SYMBOL(ww_mutex_unlock);

static inline int __sched
558
__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
559 560
{
	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
561
	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577

	if (!hold_ctx)
		return 0;

	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
		ctx->contending_lock = ww;
#endif
		return -EDEADLK;
	}

	return 0;
}

I
Ingo Molnar 已提交
578 579 580
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
581
static __always_inline int __sched
P
Peter Zijlstra 已提交
582
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
583
		    struct lockdep_map *nest_lock, unsigned long ip,
584
		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
I
Ingo Molnar 已提交
585 586 587
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
588
	unsigned long flags;
589
	bool first = false;
590
	int ret;
I
Ingo Molnar 已提交
591

592 593 594 595 596 597
	if (use_ww_ctx) {
		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
			return -EALREADY;
	}

P
Peter Zijlstra 已提交
598
	preempt_disable();
599
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
600

601 602
	if (__mutex_trylock(lock, false) ||
	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
603
		/* got the lock, yay! */
604 605 606 607 608 609 610
		lock_acquired(&lock->dep_map, ip);
		if (use_ww_ctx) {
			struct ww_mutex *ww;
			ww = container_of(lock, struct ww_mutex, base);

			ww_mutex_set_context_fastpath(ww, ww_ctx);
		}
611 612
		preempt_enable();
		return 0;
613
	}
614

615
	spin_lock_mutex(&lock->wait_lock, flags);
616
	/*
617
	 * After waiting to acquire the wait_lock, try again.
618
	 */
619
	if (__mutex_trylock(lock, false))
620 621
		goto skip_wait;

622
	debug_mutex_lock_common(lock, &waiter);
623
	debug_mutex_add_waiter(lock, &waiter, task);
I
Ingo Molnar 已提交
624 625 626 627 628

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

629
	if (__mutex_waiter_is_first(lock, &waiter))
630 631
		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);

P
Peter Zijlstra 已提交
632
	lock_contended(&lock->dep_map, ip);
633

634
	set_task_state(task, state);
I
Ingo Molnar 已提交
635
	for (;;) {
636 637 638 639 640 641
		/*
		 * Once we hold wait_lock, we're serialized against
		 * mutex_unlock() handing the lock off to us, do a trylock
		 * before testing the error conditions to make sure we pick up
		 * the handoff.
		 */
642
		if (__mutex_trylock(lock, first))
643
			goto acquired;
I
Ingo Molnar 已提交
644 645

		/*
646 647 648
		 * Check for signals and wound conditions while holding
		 * wait_lock. This ensures the lock cancellation is ordered
		 * against mutex_unlock() and wake-ups do not go missing.
I
Ingo Molnar 已提交
649
		 */
650
		if (unlikely(signal_pending_state(state, task))) {
651 652 653
			ret = -EINTR;
			goto err;
		}
I
Ingo Molnar 已提交
654

655
		if (use_ww_ctx && ww_ctx->acquired > 0) {
656
			ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
657 658
			if (ret)
				goto err;
I
Ingo Molnar 已提交
659
		}
660

661
		spin_unlock_mutex(&lock->wait_lock, flags);
662
		schedule_preempt_disabled();
663 664 665 666 667

		if (!first && __mutex_waiter_is_first(lock, &waiter)) {
			first = true;
			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
		}
668 669 670 671 672 673 674 675 676 677 678

		set_task_state(task, state);
		/*
		 * Here we order against unlock; we must either see it change
		 * state back to RUNNING and fall through the next schedule(),
		 * or we must see its unlock and acquire.
		 */
		if (__mutex_trylock(lock, first))
			break;

		spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
679
	}
680 681
	spin_lock_mutex(&lock->wait_lock, flags);
acquired:
682 683
	__set_task_state(task, TASK_RUNNING);

684
	mutex_remove_waiter(lock, &waiter, task);
685
	if (likely(list_empty(&lock->wait_list)))
686
		__mutex_clear_flag(lock, MUTEX_FLAGS);
687

688
	debug_mutex_free_waiter(&waiter);
I
Ingo Molnar 已提交
689

690 691
skip_wait:
	/* got the lock - cleanup and rejoice! */
P
Peter Zijlstra 已提交
692
	lock_acquired(&lock->dep_map, ip);
I
Ingo Molnar 已提交
693

694
	if (use_ww_ctx) {
695
		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
696
		ww_mutex_set_context_slowpath(ww, ww_ctx);
697 698
	}

699
	spin_unlock_mutex(&lock->wait_lock, flags);
P
Peter Zijlstra 已提交
700
	preempt_enable();
I
Ingo Molnar 已提交
701
	return 0;
702 703

err:
704
	__set_task_state(task, TASK_RUNNING);
705
	mutex_remove_waiter(lock, &waiter, task);
706 707 708 709 710
	spin_unlock_mutex(&lock->wait_lock, flags);
	debug_mutex_free_waiter(&waiter);
	mutex_release(&lock->dep_map, 1, ip);
	preempt_enable();
	return ret;
I
Ingo Molnar 已提交
711 712
}

713 714 715 716 717
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
718
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
719
			    subclass, NULL, _RET_IP_, NULL, 0);
720 721 722
}

EXPORT_SYMBOL_GPL(mutex_lock_nested);
723

724 725 726 727
void __sched
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
	might_sleep();
728
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
729
			    0, nest, _RET_IP_, NULL, 0);
730 731 732
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);

L
Liam R. Howlett 已提交
733 734 735 736
int __sched
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
737
	return __mutex_lock_common(lock, TASK_KILLABLE,
738
				   subclass, NULL, _RET_IP_, NULL, 0);
L
Liam R. Howlett 已提交
739 740 741
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);

742 743 744 745
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
746
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
747
				   subclass, NULL, _RET_IP_, NULL, 0);
748 749
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
750

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
	unsigned tmp;

	if (ctx->deadlock_inject_countdown-- == 0) {
		tmp = ctx->deadlock_inject_interval;
		if (tmp > UINT_MAX/4)
			tmp = UINT_MAX;
		else
			tmp = tmp*2 + tmp + tmp/2;

		ctx->deadlock_inject_interval = tmp;
		ctx->deadlock_inject_countdown = tmp;
		ctx->contending_lock = lock;

		ww_mutex_unlock(lock);

		return -EDEADLK;
	}
#endif

	return 0;
}
776 777 778 779

int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
780 781
	int ret;

782
	might_sleep();
783
	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
784
				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
785
	if (!ret && ctx->acquired > 1)
786 787 788
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
789 790 791 792 793 794
}
EXPORT_SYMBOL_GPL(__ww_mutex_lock);

int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
795 796
	int ret;

797
	might_sleep();
798
	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
799
				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
800

801
	if (!ret && ctx->acquired > 1)
802 803 804
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
805 806 807
}
EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);

808 809
#endif

I
Ingo Molnar 已提交
810 811 812
/*
 * Release the lock, slowpath:
 */
813
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
I
Ingo Molnar 已提交
814
{
815
	struct task_struct *next = NULL;
816
	unsigned long owner, flags;
817
	WAKE_Q(wake_q);
I
Ingo Molnar 已提交
818

819 820
	mutex_release(&lock->dep_map, 1, ip);

I
Ingo Molnar 已提交
821
	/*
822 823 824 825 826
	 * Release the lock before (potentially) taking the spinlock such that
	 * other contenders can get on with things ASAP.
	 *
	 * Except when HANDOFF, in that case we must not clear the owner field,
	 * but instead set it to the top waiter.
I
Ingo Molnar 已提交
827
	 */
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
	owner = atomic_long_read(&lock->owner);
	for (;;) {
		unsigned long old;

#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
#endif

		if (owner & MUTEX_FLAG_HANDOFF)
			break;

		old = atomic_long_cmpxchg_release(&lock->owner, owner,
						  __owner_flags(owner));
		if (old == owner) {
			if (owner & MUTEX_FLAG_WAITERS)
				break;

			return;
		}

		owner = old;
	}
I
Ingo Molnar 已提交
850

851 852
	spin_lock_mutex(&lock->wait_lock, flags);
	debug_mutex_unlock(lock);
I
Ingo Molnar 已提交
853 854 855
	if (!list_empty(&lock->wait_list)) {
		/* get the first entry from the wait-list: */
		struct mutex_waiter *waiter =
856 857 858 859
			list_first_entry(&lock->wait_list,
					 struct mutex_waiter, list);

		next = waiter->task;
I
Ingo Molnar 已提交
860 861

		debug_mutex_wake_waiter(lock, waiter);
862
		wake_q_add(&wake_q, next);
I
Ingo Molnar 已提交
863 864
	}

865 866 867
	if (owner & MUTEX_FLAG_HANDOFF)
		__mutex_handoff(lock, next);

868
	spin_unlock_mutex(&lock->wait_lock, flags);
869

870
	wake_up_q(&wake_q);
I
Ingo Molnar 已提交
871 872
}

P
Peter Zijlstra 已提交
873
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
874 875 876 877
/*
 * Here come the less common (and hence less performance-critical) APIs:
 * mutex_lock_interruptible() and mutex_trylock().
 */
878
static noinline int __sched
879
__mutex_lock_killable_slowpath(struct mutex *lock);
L
Liam R. Howlett 已提交
880

881
static noinline int __sched
882
__mutex_lock_interruptible_slowpath(struct mutex *lock);
I
Ingo Molnar 已提交
883

884 885
/**
 * mutex_lock_interruptible - acquire the mutex, interruptible
I
Ingo Molnar 已提交
886 887 888 889 890 891 892 893 894
 * @lock: the mutex to be acquired
 *
 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
 * been acquired or sleep until the mutex becomes available. If a
 * signal arrives while waiting for the lock then this function
 * returns -EINTR.
 *
 * This function is similar to (but not equivalent to) down_interruptible().
 */
895
int __sched mutex_lock_interruptible(struct mutex *lock)
I
Ingo Molnar 已提交
896
{
897
	might_sleep();
898 899

	if (__mutex_trylock_fast(lock))
900
		return 0;
901 902

	return __mutex_lock_interruptible_slowpath(lock);
I
Ingo Molnar 已提交
903 904 905 906
}

EXPORT_SYMBOL(mutex_lock_interruptible);

907
int __sched mutex_lock_killable(struct mutex *lock)
L
Liam R. Howlett 已提交
908 909
{
	might_sleep();
910 911

	if (__mutex_trylock_fast(lock))
912
		return 0;
913 914

	return __mutex_lock_killable_slowpath(lock);
L
Liam R. Howlett 已提交
915 916 917
}
EXPORT_SYMBOL(mutex_lock_killable);

918 919
static noinline void __sched
__mutex_lock_slowpath(struct mutex *lock)
P
Peter Zijlstra 已提交
920
{
921
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
922
			    NULL, _RET_IP_, NULL, 0);
P
Peter Zijlstra 已提交
923 924
}

925
static noinline int __sched
926
__mutex_lock_killable_slowpath(struct mutex *lock)
L
Liam R. Howlett 已提交
927
{
928
	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
929
				   NULL, _RET_IP_, NULL, 0);
L
Liam R. Howlett 已提交
930 931
}

932
static noinline int __sched
933
__mutex_lock_interruptible_slowpath(struct mutex *lock)
I
Ingo Molnar 已提交
934
{
935
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
936
				   NULL, _RET_IP_, NULL, 0);
937 938 939 940 941 942
}

static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
943
				   NULL, _RET_IP_, ctx, 1);
I
Ingo Molnar 已提交
944
}
945 946 947 948 949 950

static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
					    struct ww_acquire_ctx *ctx)
{
	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
951
				   NULL, _RET_IP_, ctx, 1);
952 953
}

P
Peter Zijlstra 已提交
954
#endif
I
Ingo Molnar 已提交
955

956 957
/**
 * mutex_trylock - try to acquire the mutex, without waiting
I
Ingo Molnar 已提交
958 959 960 961 962 963
 * @lock: the mutex to be acquired
 *
 * Try to acquire the mutex atomically. Returns 1 if the mutex
 * has been acquired successfully, and 0 on contention.
 *
 * NOTE: this function follows the spin_trylock() convention, so
964
 * it is negated from the down_trylock() return values! Be careful
I
Ingo Molnar 已提交
965 966 967 968 969
 * about this when converting semaphore users to mutexes.
 *
 * This function must not be used in interrupt context. The
 * mutex must be released by the same task that acquired it.
 */
970
int __sched mutex_trylock(struct mutex *lock)
I
Ingo Molnar 已提交
971
{
972
	bool locked = __mutex_trylock(lock, false);
973

974 975
	if (locked)
		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
976

977
	return locked;
I
Ingo Molnar 已提交
978 979
}
EXPORT_SYMBOL(mutex_trylock);
980

981 982 983 984 985 986
#ifndef CONFIG_DEBUG_LOCK_ALLOC
int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	might_sleep();

987
	if (__mutex_trylock_fast(&lock->base)) {
988
		ww_mutex_set_context_fastpath(lock, ctx);
989 990 991 992
		return 0;
	}

	return __ww_mutex_lock_slowpath(lock, ctx);
993 994 995 996 997 998 999 1000
}
EXPORT_SYMBOL(__ww_mutex_lock);

int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	might_sleep();

1001
	if (__mutex_trylock_fast(&lock->base)) {
1002
		ww_mutex_set_context_fastpath(lock, ctx);
1003 1004 1005 1006
		return 0;
	}

	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1007 1008 1009 1010 1011
}
EXPORT_SYMBOL(__ww_mutex_lock_interruptible);

#endif

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
/**
 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
 * @cnt: the atomic which we are to dec
 * @lock: the mutex to return holding if we dec to 0
 *
 * return true and hold lock if we dec to 0, return false otherwise
 */
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
	/* dec if we can't possibly hit 0 */
	if (atomic_add_unless(cnt, -1, 1))
		return 0;
	/* we might hit 0, so take the lock */
	mutex_lock(lock);
	if (!atomic_dec_and_test(cnt)) {
		/* when we actually did the dec, we didn't hit 0 */
		mutex_unlock(lock);
		return 0;
	}
	/* we hit 0, and we hold the lock */
	return 1;
}
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);