mutex.c 24.1 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/locking/mutex.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12
 *
 * Mutexes: blocking mutual exclusion locks
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 * David Howells for suggestions and improvements.
 *
13 14 15 16 17
 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 *    from the -rt tree, where it was originally implemented for rtmutexes
 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 *    and Sven Dietrich.
 *
I
Ingo Molnar 已提交
18 19 20
 * Also see Documentation/mutex-design.txt.
 */
#include <linux/mutex.h>
21
#include <linux/ww_mutex.h>
I
Ingo Molnar 已提交
22
#include <linux/sched.h>
23
#include <linux/sched/rt.h>
24
#include <linux/export.h>
I
Ingo Molnar 已提交
25 26
#include <linux/spinlock.h>
#include <linux/interrupt.h>
27
#include <linux/debug_locks.h>
28
#include "mcs_spinlock.h"
I
Ingo Molnar 已提交
29 30 31 32 33 34 35 36

/*
 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
 * which forces all calls into the slowpath:
 */
#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
# include <asm-generic/mutex-null.h>
P
Peter Zijlstra 已提交
37 38 39 40 41 42 43
/*
 * Must be 0 for the debug case so we do not do the unlock outside of the
 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
 * case.
 */
# undef __mutex_slowpath_needs_to_unlock
# define  __mutex_slowpath_needs_to_unlock()	0
I
Ingo Molnar 已提交
44 45 46 47 48
#else
# include "mutex.h"
# include <asm/mutex.h>
#endif

49 50
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
I
Ingo Molnar 已提交
51 52 53 54
{
	atomic_set(&lock->count, 1);
	spin_lock_init(&lock->wait_lock);
	INIT_LIST_HEAD(&lock->wait_list);
55
	mutex_clear_owner(lock);
56
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
57
	osq_lock_init(&lock->osq);
58
#endif
I
Ingo Molnar 已提交
59

60
	debug_mutex_init(lock, name, key);
I
Ingo Molnar 已提交
61 62 63 64
}

EXPORT_SYMBOL(__mutex_init);

P
Peter Zijlstra 已提交
65
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
66 67 68 69 70 71
/*
 * We split the mutex lock/unlock logic into separate fastpath and
 * slowpath functions, to reduce the register pressure on the fastpath.
 * We also put the fastpath first in the kernel image, to make sure the
 * branch is predicted by the CPU as default-untaken.
 */
A
Andi Kleen 已提交
72
__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
I
Ingo Molnar 已提交
73

74
/**
I
Ingo Molnar 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
 * mutex_lock - acquire the mutex
 * @lock: the mutex to be acquired
 *
 * Lock the mutex exclusively for this task. If the mutex is not
 * available right now, it will sleep until it can get it.
 *
 * The mutex must later on be released by the same task that
 * acquired it. Recursive locking is not allowed. The task
 * may not exit without first unlocking the mutex. Also, kernel
 * memory where the mutex resides mutex must not be freed with
 * the mutex still locked. The mutex must first be initialized
 * (or statically defined) before it can be locked. memset()-ing
 * the mutex to 0 is not allowed.
 *
 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 *   checks that will enforce the restrictions and will also do
 *   deadlock debugging. )
 *
 * This function is similar to (but not equivalent to) down().
 */
95
void __sched mutex_lock(struct mutex *lock)
I
Ingo Molnar 已提交
96
{
97
	might_sleep();
I
Ingo Molnar 已提交
98 99 100 101 102
	/*
	 * The locking fastpath is the 1->0 transition from
	 * 'unlocked' into 'locked' state.
	 */
	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103
	mutex_set_owner(lock);
I
Ingo Molnar 已提交
104 105 106
}

EXPORT_SYMBOL(mutex_lock);
P
Peter Zijlstra 已提交
107
#endif
I
Ingo Molnar 已提交
108

109
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
110 111 112 113 114 115 116
/*
 * In order to avoid a stampede of mutex spinners from acquiring the mutex
 * more or less simultaneously, the spinners need to acquire a MCS lock
 * first before spinning on the owner field.
 *
 */

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
/*
 * Mutex spinning code migrated from kernel/sched/core.c
 */

static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
	if (lock->owner != owner)
		return false;

	/*
	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
	 * lock->owner still matches owner, if that fails, owner might
	 * point to free()d memory, if it still matches, the rcu_read_lock()
	 * ensures the memory stays valid.
	 */
	barrier();

	return owner->on_cpu;
}

/*
 * Look out! "owner" is an entirely speculative pointer
 * access and not reliable.
 */
static noinline
int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
{
	rcu_read_lock();
	while (owner_running(lock, owner)) {
		if (need_resched())
			break;

149
		cpu_relax_lowlatency();
150 151 152 153 154 155 156 157 158 159
	}
	rcu_read_unlock();

	/*
	 * We break out the loop above on need_resched() and when the
	 * owner changed, which is a sign for heavy contention. Return
	 * success only when lock->owner is NULL.
	 */
	return lock->owner == NULL;
}
160 161 162 163 164 165

/*
 * Initial check for entering the mutex spinning loop
 */
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
166
	struct task_struct *owner;
167 168
	int retval = 1;

169 170 171
	if (need_resched())
		return 0;

172
	rcu_read_lock();
173 174 175
	owner = ACCESS_ONCE(lock->owner);
	if (owner)
		retval = owner->on_cpu;
176 177 178 179 180 181 182
	rcu_read_unlock();
	/*
	 * if lock->owner is not set, the mutex owner may have just acquired
	 * it and not set the owner yet or the mutex has been released.
	 */
	return retval;
}
183 184
#endif

A
Andi Kleen 已提交
185 186
__visible __used noinline
void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
I
Ingo Molnar 已提交
187

188
/**
I
Ingo Molnar 已提交
189 190 191 192 193 194 195 196 197 198
 * mutex_unlock - release the mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a not locked mutex is not allowed.
 *
 * This function is similar to (but not equivalent to) up().
 */
199
void __sched mutex_unlock(struct mutex *lock)
I
Ingo Molnar 已提交
200 201 202 203 204
{
	/*
	 * The unlocking fastpath is the 0->1 transition from 'locked'
	 * into 'unlocked' state:
	 */
205 206 207 208 209 210 211 212
#ifndef CONFIG_DEBUG_MUTEXES
	/*
	 * When debugging is enabled we must not clear the owner before time,
	 * the slow path will always be taken, and that clears the owner field
	 * after verifying that it was indeed current.
	 */
	mutex_clear_owner(lock);
#endif
I
Ingo Molnar 已提交
213 214 215 216 217
	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}

EXPORT_SYMBOL(mutex_unlock);

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
/**
 * ww_mutex_unlock - release the w/w mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously with any of the
 * ww_mutex_lock* functions (with or without an acquire context). It is
 * forbidden to release the locks after releasing the acquire context.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a unlocked mutex is not allowed.
 */
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
	/*
	 * The unlocking fastpath is the 0->1 transition from 'locked'
	 * into 'unlocked' state:
	 */
	if (lock->ctx) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
#endif
		if (lock->ctx->acquired > 0)
			lock->ctx->acquired--;
		lock->ctx = NULL;
	}

#ifndef CONFIG_DEBUG_MUTEXES
	/*
	 * When debugging is enabled we must not clear the owner before time,
	 * the slow path will always be taken, and that clears the owner field
	 * after verifying that it was indeed current.
	 */
	mutex_clear_owner(&lock->base);
#endif
	__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
}
EXPORT_SYMBOL(ww_mutex_unlock);

static inline int __sched
__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
{
	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
	struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);

	if (!hold_ctx)
		return 0;

	if (unlikely(ctx == hold_ctx))
		return -EALREADY;

	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
		ctx->contending_lock = ww;
#endif
		return -EDEADLK;
	}

	return 0;
}

static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
						   struct ww_acquire_ctx *ww_ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
	/*
	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
	 * but released with a normal mutex_unlock in this call.
	 *
	 * This should never happen, always use ww_mutex_unlock.
	 */
	DEBUG_LOCKS_WARN_ON(ww->ctx);

	/*
	 * Not quite done after calling ww_acquire_done() ?
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);

	if (ww_ctx->contending_lock) {
		/*
		 * After -EDEADLK you tried to
		 * acquire a different ww_mutex? Bad!
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);

		/*
		 * You called ww_mutex_lock after receiving -EDEADLK,
		 * but 'forgot' to unlock everything else first?
		 */
		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
		ww_ctx->contending_lock = NULL;
	}

	/*
	 * Naughty, using a different class will lead to undefined behavior!
	 */
	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
#endif
	ww_ctx->acquired++;
}

/*
 * after acquiring lock with fastpath or when we lost out in contested
 * slowpath, set ctx and wake up any waiters so they can recheck.
 *
 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
 * as the fastpath and opportunistic spinning are disabled in that case.
 */
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
			       struct ww_acquire_ctx *ctx)
{
	unsigned long flags;
	struct mutex_waiter *cur;

	ww_mutex_lock_acquired(lock, ctx);

	lock->ctx = ctx;

	/*
	 * The lock->ctx update should be visible on all cores before
	 * the atomic read is done, otherwise contended waiters might be
	 * missed. The contended waiters will either see ww_ctx == NULL
	 * and keep spinning, or it will acquire wait_lock, add itself
	 * to waiter list and sleep.
	 */
	smp_mb(); /* ^^^ */

	/*
	 * Check if lock is contended, if not there is nobody to wake up
	 */
	if (likely(atomic_read(&lock->base.count) == 0))
		return;

	/*
	 * Uh oh, we raced in fastpath, wake up everyone in this case,
	 * so they can see the new lock->ctx.
	 */
	spin_lock_mutex(&lock->base.wait_lock, flags);
	list_for_each_entry(cur, &lock->base.wait_list, list) {
		debug_mutex_wake_waiter(&lock->base, cur);
		wake_up_process(cur->task);
	}
	spin_unlock_mutex(&lock->base.wait_lock, flags);
}

I
Ingo Molnar 已提交
365 366 367
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
368
static __always_inline int __sched
P
Peter Zijlstra 已提交
369
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
370
		    struct lockdep_map *nest_lock, unsigned long ip,
371
		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
I
Ingo Molnar 已提交
372 373 374
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
375
	unsigned long flags;
376
	int ret;
I
Ingo Molnar 已提交
377

P
Peter Zijlstra 已提交
378
	preempt_disable();
379
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
380 381

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
382 383 384
	/*
	 * Optimistic spinning.
	 *
385 386 387 388
	 * We try to spin for acquisition when we find that the lock owner
	 * is currently running on a (different) CPU and while we don't
	 * need to reschedule. The rationale is that if the lock owner is
	 * running, it is likely to release the lock soon.
389 390 391 392 393 394 395
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
396 397 398 399 400
	 *
	 * The mutex spinners are queued up using MCS lock so that only one
	 * spinner can compete for the mutex. However, if mutex spinning isn't
	 * going to happen, there is no point in going through the lock/unlock
	 * overhead.
401
	 */
402 403
	if (!mutex_can_spin_on_owner(lock))
		goto slowpath;
404

405 406 407
	if (!osq_lock(&lock->osq))
		goto slowpath;

408
	for (;;) {
409
		struct task_struct *owner;
410

411
		if (use_ww_ctx && ww_ctx->acquired > 0) {
412 413 414 415 416 417 418 419 420 421 422 423
			struct ww_mutex *ww;

			ww = container_of(lock, struct ww_mutex, base);
			/*
			 * If ww->ctx is set the contents are undefined, only
			 * by acquiring wait_lock there is a guarantee that
			 * they are not invalid when reading.
			 *
			 * As such, when deadlock detection needs to be
			 * performed the optimistic spinning cannot be done.
			 */
			if (ACCESS_ONCE(ww->ctx))
424
				break;
425 426
		}

427 428 429 430 431
		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		owner = ACCESS_ONCE(lock->owner);
432 433
		if (owner && !mutex_spin_on_owner(lock, owner))
			break;
434

435 436
		/* Try to acquire the mutex if it is unlocked. */
		if (!mutex_is_locked(lock) &&
437
		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
438
			lock_acquired(&lock->dep_map, ip);
439
			if (use_ww_ctx) {
440 441 442 443 444 445
				struct ww_mutex *ww;
				ww = container_of(lock, struct ww_mutex, base);

				ww_mutex_set_context_fastpath(ww, ww_ctx);
			}

446
			mutex_set_owner(lock);
447
			osq_unlock(&lock->osq);
448 449 450 451
			preempt_enable();
			return 0;
		}

452 453 454 455 456 457 458
		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(task)))
459
			break;
460 461 462 463 464 465 466

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
467
		cpu_relax_lowlatency();
468
	}
469
	osq_unlock(&lock->osq);
470
slowpath:
471 472 473 474 475 476 477
	/*
	 * If we fell out of the spin path because of need_resched(),
	 * reschedule now, before we try-lock the mutex. This avoids getting
	 * scheduled out right after we obtained the mutex.
	 */
	if (need_resched())
		schedule_preempt_disabled();
478
#endif
479
	spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
480

481 482
	/*
	 * Once more, try to acquire the lock. Only try-lock the mutex if
483
	 * it is unlocked to reduce unnecessary xchg() operations.
484
	 */
485
	if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
486 487
		goto skip_wait;

488
	debug_mutex_lock_common(lock, &waiter);
R
Roman Zippel 已提交
489
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
I
Ingo Molnar 已提交
490 491 492 493 494

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

P
Peter Zijlstra 已提交
495
	lock_contended(&lock->dep_map, ip);
496

I
Ingo Molnar 已提交
497 498 499 500 501 502 503 504
	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
505 506
		 * other waiters. We only attempt the xchg if the count is
		 * non-negative in order to avoid unnecessary xchg operations:
I
Ingo Molnar 已提交
507
		 */
508
		if (atomic_read(&lock->count) >= 0 &&
509
		    (atomic_xchg(&lock->count, -1) == 1))
I
Ingo Molnar 已提交
510 511 512 513 514 515
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
516
		if (unlikely(signal_pending_state(state, task))) {
517 518 519
			ret = -EINTR;
			goto err;
		}
I
Ingo Molnar 已提交
520

521
		if (use_ww_ctx && ww_ctx->acquired > 0) {
522 523 524
			ret = __mutex_lock_check_stamp(lock, ww_ctx);
			if (ret)
				goto err;
I
Ingo Molnar 已提交
525
		}
526

I
Ingo Molnar 已提交
527 528
		__set_task_state(task, state);

L
Lucas De Marchi 已提交
529
		/* didn't get the lock, go to sleep: */
530
		spin_unlock_mutex(&lock->wait_lock, flags);
531
		schedule_preempt_disabled();
532
		spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
533
	}
534 535 536 537 538
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);
	debug_mutex_free_waiter(&waiter);
I
Ingo Molnar 已提交
539

540 541
skip_wait:
	/* got the lock - cleanup and rejoice! */
P
Peter Zijlstra 已提交
542
	lock_acquired(&lock->dep_map, ip);
543
	mutex_set_owner(lock);
I
Ingo Molnar 已提交
544

545
	if (use_ww_ctx) {
546
		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
		struct mutex_waiter *cur;

		/*
		 * This branch gets optimized out for the common case,
		 * and is only important for ww_mutex_lock.
		 */
		ww_mutex_lock_acquired(ww, ww_ctx);
		ww->ctx = ww_ctx;

		/*
		 * Give any possible sleeping processes the chance to wake up,
		 * so they can recheck if they have to back off.
		 */
		list_for_each_entry(cur, &lock->wait_list, list) {
			debug_mutex_wake_waiter(lock, cur);
			wake_up_process(cur->task);
		}
	}

566
	spin_unlock_mutex(&lock->wait_lock, flags);
P
Peter Zijlstra 已提交
567
	preempt_enable();
I
Ingo Molnar 已提交
568
	return 0;
569 570 571 572 573 574 575 576

err:
	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
	spin_unlock_mutex(&lock->wait_lock, flags);
	debug_mutex_free_waiter(&waiter);
	mutex_release(&lock->dep_map, 1, ip);
	preempt_enable();
	return ret;
I
Ingo Molnar 已提交
577 578
}

579 580 581 582 583
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
584
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
585
			    subclass, NULL, _RET_IP_, NULL, 0);
586 587 588
}

EXPORT_SYMBOL_GPL(mutex_lock_nested);
589

590 591 592 593
void __sched
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
	might_sleep();
594
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
595
			    0, nest, _RET_IP_, NULL, 0);
596 597 598 599
}

EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);

L
Liam R. Howlett 已提交
600 601 602 603
int __sched
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
604
	return __mutex_lock_common(lock, TASK_KILLABLE,
605
				   subclass, NULL, _RET_IP_, NULL, 0);
L
Liam R. Howlett 已提交
606 607 608
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);

609 610 611 612
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
613
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
614
				   subclass, NULL, _RET_IP_, NULL, 0);
615 616 617
}

EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
618

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
	unsigned tmp;

	if (ctx->deadlock_inject_countdown-- == 0) {
		tmp = ctx->deadlock_inject_interval;
		if (tmp > UINT_MAX/4)
			tmp = UINT_MAX;
		else
			tmp = tmp*2 + tmp + tmp/2;

		ctx->deadlock_inject_interval = tmp;
		ctx->deadlock_inject_countdown = tmp;
		ctx->contending_lock = lock;

		ww_mutex_unlock(lock);

		return -EDEADLK;
	}
#endif

	return 0;
}
644 645 646 647

int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
648 649
	int ret;

650
	might_sleep();
651
	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
652
				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
653
	if (!ret && ctx->acquired > 1)
654 655 656
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
657 658 659 660 661 662
}
EXPORT_SYMBOL_GPL(__ww_mutex_lock);

int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
663 664
	int ret;

665
	might_sleep();
666
	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
667
				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
668

669
	if (!ret && ctx->acquired > 1)
670 671 672
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
673 674 675
}
EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);

676 677
#endif

I
Ingo Molnar 已提交
678 679 680
/*
 * Release the lock, slowpath:
 */
681
static inline void
682
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
I
Ingo Molnar 已提交
683
{
684
	struct mutex *lock = container_of(lock_count, struct mutex, count);
685
	unsigned long flags;
I
Ingo Molnar 已提交
686 687 688 689 690 691 692 693 694

	/*
	 * some architectures leave the lock unlocked in the fastpath failure
	 * case, others need to leave it locked. In the later case we have to
	 * unlock it here
	 */
	if (__mutex_slowpath_needs_to_unlock())
		atomic_set(&lock->count, 1);

695 696 697 698
	spin_lock_mutex(&lock->wait_lock, flags);
	mutex_release(&lock->dep_map, nested, _RET_IP_);
	debug_mutex_unlock(lock);

I
Ingo Molnar 已提交
699 700 701 702 703 704 705 706 707 708 709
	if (!list_empty(&lock->wait_list)) {
		/* get the first entry from the wait-list: */
		struct mutex_waiter *waiter =
				list_entry(lock->wait_list.next,
					   struct mutex_waiter, list);

		debug_mutex_wake_waiter(lock, waiter);

		wake_up_process(waiter->task);
	}

710
	spin_unlock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
711 712
}

713 714 715
/*
 * Release the lock, slowpath:
 */
A
Andi Kleen 已提交
716
__visible void
717 718
__mutex_unlock_slowpath(atomic_t *lock_count)
{
719
	__mutex_unlock_common_slowpath(lock_count, 1);
720 721
}

P
Peter Zijlstra 已提交
722
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
723 724 725 726
/*
 * Here come the less common (and hence less performance-critical) APIs:
 * mutex_lock_interruptible() and mutex_trylock().
 */
727
static noinline int __sched
728
__mutex_lock_killable_slowpath(struct mutex *lock);
L
Liam R. Howlett 已提交
729

730
static noinline int __sched
731
__mutex_lock_interruptible_slowpath(struct mutex *lock);
I
Ingo Molnar 已提交
732

733 734
/**
 * mutex_lock_interruptible - acquire the mutex, interruptible
I
Ingo Molnar 已提交
735 736 737 738 739 740 741 742 743
 * @lock: the mutex to be acquired
 *
 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
 * been acquired or sleep until the mutex becomes available. If a
 * signal arrives while waiting for the lock then this function
 * returns -EINTR.
 *
 * This function is similar to (but not equivalent to) down_interruptible().
 */
744
int __sched mutex_lock_interruptible(struct mutex *lock)
I
Ingo Molnar 已提交
745
{
746 747
	int ret;

748
	might_sleep();
749 750
	ret =  __mutex_fastpath_lock_retval(&lock->count);
	if (likely(!ret)) {
751
		mutex_set_owner(lock);
752 753 754
		return 0;
	} else
		return __mutex_lock_interruptible_slowpath(lock);
I
Ingo Molnar 已提交
755 756 757 758
}

EXPORT_SYMBOL(mutex_lock_interruptible);

759
int __sched mutex_lock_killable(struct mutex *lock)
L
Liam R. Howlett 已提交
760
{
761 762
	int ret;

L
Liam R. Howlett 已提交
763
	might_sleep();
764 765
	ret = __mutex_fastpath_lock_retval(&lock->count);
	if (likely(!ret)) {
766
		mutex_set_owner(lock);
767 768 769
		return 0;
	} else
		return __mutex_lock_killable_slowpath(lock);
L
Liam R. Howlett 已提交
770 771 772
}
EXPORT_SYMBOL(mutex_lock_killable);

A
Andi Kleen 已提交
773
__visible void __sched
P
Peter Zijlstra 已提交
774 775 776 777
__mutex_lock_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);

778
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
779
			    NULL, _RET_IP_, NULL, 0);
P
Peter Zijlstra 已提交
780 781
}

782
static noinline int __sched
783
__mutex_lock_killable_slowpath(struct mutex *lock)
L
Liam R. Howlett 已提交
784
{
785
	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
786
				   NULL, _RET_IP_, NULL, 0);
L
Liam R. Howlett 已提交
787 788
}

789
static noinline int __sched
790
__mutex_lock_interruptible_slowpath(struct mutex *lock)
I
Ingo Molnar 已提交
791
{
792
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
793
				   NULL, _RET_IP_, NULL, 0);
794 795 796 797 798 799
}

static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
800
				   NULL, _RET_IP_, ctx, 1);
I
Ingo Molnar 已提交
801
}
802 803 804 805 806 807

static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
					    struct ww_acquire_ctx *ctx)
{
	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
808
				   NULL, _RET_IP_, ctx, 1);
809 810
}

P
Peter Zijlstra 已提交
811
#endif
I
Ingo Molnar 已提交
812 813 814 815 816 817 818 819

/*
 * Spinlock based trylock, we take the spinlock and check whether we
 * can get the lock:
 */
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);
820
	unsigned long flags;
I
Ingo Molnar 已提交
821 822
	int prev;

823 824 825 826
	/* No need to trylock if the mutex is locked. */
	if (mutex_is_locked(lock))
		return 0;

827
	spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
828 829

	prev = atomic_xchg(&lock->count, -1);
830
	if (likely(prev == 1)) {
831
		mutex_set_owner(lock);
832 833
		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
	}
834

I
Ingo Molnar 已提交
835 836 837 838
	/* Set it back to 0 if there are no waiters: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

839
	spin_unlock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
840 841 842 843

	return prev == 1;
}

844 845
/**
 * mutex_trylock - try to acquire the mutex, without waiting
I
Ingo Molnar 已提交
846 847 848 849 850 851
 * @lock: the mutex to be acquired
 *
 * Try to acquire the mutex atomically. Returns 1 if the mutex
 * has been acquired successfully, and 0 on contention.
 *
 * NOTE: this function follows the spin_trylock() convention, so
852
 * it is negated from the down_trylock() return values! Be careful
I
Ingo Molnar 已提交
853 854 855 856 857
 * about this when converting semaphore users to mutexes.
 *
 * This function must not be used in interrupt context. The
 * mutex must be released by the same task that acquired it.
 */
858
int __sched mutex_trylock(struct mutex *lock)
I
Ingo Molnar 已提交
859
{
860 861 862 863 864 865 866
	int ret;

	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
	if (ret)
		mutex_set_owner(lock);

	return ret;
I
Ingo Molnar 已提交
867 868
}
EXPORT_SYMBOL(mutex_trylock);
869

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
#ifndef CONFIG_DEBUG_LOCK_ALLOC
int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	int ret;

	might_sleep();

	ret = __mutex_fastpath_lock_retval(&lock->base.count);

	if (likely(!ret)) {
		ww_mutex_set_context_fastpath(lock, ctx);
		mutex_set_owner(&lock->base);
	} else
		ret = __ww_mutex_lock_slowpath(lock, ctx);
	return ret;
}
EXPORT_SYMBOL(__ww_mutex_lock);

int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	int ret;

	might_sleep();

	ret = __mutex_fastpath_lock_retval(&lock->base.count);

	if (likely(!ret)) {
		ww_mutex_set_context_fastpath(lock, ctx);
		mutex_set_owner(&lock->base);
	} else
		ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
	return ret;
}
EXPORT_SYMBOL(__ww_mutex_lock_interruptible);

#endif

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
/**
 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
 * @cnt: the atomic which we are to dec
 * @lock: the mutex to return holding if we dec to 0
 *
 * return true and hold lock if we dec to 0, return false otherwise
 */
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
	/* dec if we can't possibly hit 0 */
	if (atomic_add_unless(cnt, -1, 1))
		return 0;
	/* we might hit 0, so take the lock */
	mutex_lock(lock);
	if (!atomic_dec_and_test(cnt)) {
		/* when we actually did the dec, we didn't hit 0 */
		mutex_unlock(lock);
		return 0;
	}
	/* we hit 0, and we hold the lock */
	return 1;
}
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);