mutex.c 13.1 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * kernel/mutex.c
 *
 * Mutexes: blocking mutual exclusion locks
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 * David Howells for suggestions and improvements.
 *
13 14 15 16 17
 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 *    from the -rt tree, where it was originally implemented for rtmutexes
 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 *    and Sven Dietrich.
 *
I
Ingo Molnar 已提交
18 19 20 21 22 23 24
 * Also see Documentation/mutex-design.txt.
 */
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
25
#include <linux/debug_locks.h>
I
Ingo Molnar 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38

/*
 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
 * which forces all calls into the slowpath:
 */
#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
# include <asm-generic/mutex-null.h>
#else
# include "mutex.h"
# include <asm/mutex.h>
#endif

39 40
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
I
Ingo Molnar 已提交
41 42 43 44
{
	atomic_set(&lock->count, 1);
	spin_lock_init(&lock->wait_lock);
	INIT_LIST_HEAD(&lock->wait_list);
45
	mutex_clear_owner(lock);
I
Ingo Molnar 已提交
46

47
	debug_mutex_init(lock, name, key);
I
Ingo Molnar 已提交
48 49 50 51
}

EXPORT_SYMBOL(__mutex_init);

P
Peter Zijlstra 已提交
52
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
53 54 55 56 57 58
/*
 * We split the mutex lock/unlock logic into separate fastpath and
 * slowpath functions, to reduce the register pressure on the fastpath.
 * We also put the fastpath first in the kernel image, to make sure the
 * branch is predicted by the CPU as default-untaken.
 */
59
static __used noinline void __sched
60
__mutex_lock_slowpath(atomic_t *lock_count);
I
Ingo Molnar 已提交
61

62
/**
I
Ingo Molnar 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
 * mutex_lock - acquire the mutex
 * @lock: the mutex to be acquired
 *
 * Lock the mutex exclusively for this task. If the mutex is not
 * available right now, it will sleep until it can get it.
 *
 * The mutex must later on be released by the same task that
 * acquired it. Recursive locking is not allowed. The task
 * may not exit without first unlocking the mutex. Also, kernel
 * memory where the mutex resides mutex must not be freed with
 * the mutex still locked. The mutex must first be initialized
 * (or statically defined) before it can be locked. memset()-ing
 * the mutex to 0 is not allowed.
 *
 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 *   checks that will enforce the restrictions and will also do
 *   deadlock debugging. )
 *
 * This function is similar to (but not equivalent to) down().
 */
83
void __sched mutex_lock(struct mutex *lock)
I
Ingo Molnar 已提交
84
{
85
	might_sleep();
I
Ingo Molnar 已提交
86 87 88 89 90
	/*
	 * The locking fastpath is the 1->0 transition from
	 * 'unlocked' into 'locked' state.
	 */
	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
91
	mutex_set_owner(lock);
I
Ingo Molnar 已提交
92 93 94
}

EXPORT_SYMBOL(mutex_lock);
P
Peter Zijlstra 已提交
95
#endif
I
Ingo Molnar 已提交
96

97
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
I
Ingo Molnar 已提交
98

99
/**
I
Ingo Molnar 已提交
100 101 102 103 104 105 106 107 108 109
 * mutex_unlock - release the mutex
 * @lock: the mutex to be released
 *
 * Unlock a mutex that has been locked by this task previously.
 *
 * This function must not be used in interrupt context. Unlocking
 * of a not locked mutex is not allowed.
 *
 * This function is similar to (but not equivalent to) up().
 */
110
void __sched mutex_unlock(struct mutex *lock)
I
Ingo Molnar 已提交
111 112 113 114 115
{
	/*
	 * The unlocking fastpath is the 0->1 transition from 'locked'
	 * into 'unlocked' state:
	 */
116 117 118 119 120 121 122 123
#ifndef CONFIG_DEBUG_MUTEXES
	/*
	 * When debugging is enabled we must not clear the owner before time,
	 * the slow path will always be taken, and that clears the owner field
	 * after verifying that it was indeed current.
	 */
	mutex_clear_owner(lock);
#endif
I
Ingo Molnar 已提交
124 125 126 127 128 129 130 131 132
	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}

EXPORT_SYMBOL(mutex_unlock);

/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
P
Peter Zijlstra 已提交
133 134
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
	       	unsigned long ip)
I
Ingo Molnar 已提交
135 136 137
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
138
	unsigned long flags;
I
Ingo Molnar 已提交
139

P
Peter Zijlstra 已提交
140
	preempt_disable();
141
	mutex_acquire(&lock->dep_map, subclass, 0, ip);
142 143

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
	/*
	 * Optimistic spinning.
	 *
	 * We try to spin for acquisition when we find that there are no
	 * pending waiters and the lock owner is currently running on a
	 * (different) CPU.
	 *
	 * The rationale is that if the lock owner is running, it is likely to
	 * release the lock soon.
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
	 */

	for (;;) {
		struct thread_info *owner;

165 166 167 168 169 170 171
		/*
		 * If we own the BKL, then don't spin. The owner of
		 * the mutex might be waiting on us to release the BKL.
		 */
		if (unlikely(current->lock_depth >= 0))
			break;

172 173 174 175 176 177 178 179
		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		owner = ACCESS_ONCE(lock->owner);
		if (owner && !mutex_spin_on_owner(lock, owner))
			break;

180 181 182 183 184 185 186
		if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
			lock_acquired(&lock->dep_map, ip);
			mutex_set_owner(lock);
			preempt_enable();
			return 0;
		}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(task)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		cpu_relax();
	}
#endif
205
	spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
206

207
	debug_mutex_lock_common(lock, &waiter);
R
Roman Zippel 已提交
208
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
I
Ingo Molnar 已提交
209 210 211 212 213

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

P
Peter Zijlstra 已提交
214
	if (atomic_xchg(&lock->count, -1) == 1)
215 216
		goto done;

P
Peter Zijlstra 已提交
217
	lock_contended(&lock->dep_map, ip);
218

I
Ingo Molnar 已提交
219 220 221 222 223 224 225 226 227 228
	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
P
Peter Zijlstra 已提交
229
		if (atomic_xchg(&lock->count, -1) == 1)
I
Ingo Molnar 已提交
230 231 232 233 234 235
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
236
		if (unlikely(signal_pending_state(state, task))) {
L
Liam R. Howlett 已提交
237 238
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
P
Peter Zijlstra 已提交
239
			mutex_release(&lock->dep_map, 1, ip);
240
			spin_unlock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
241 242

			debug_mutex_free_waiter(&waiter);
P
Peter Zijlstra 已提交
243
			preempt_enable();
I
Ingo Molnar 已提交
244 245 246 247 248
			return -EINTR;
		}
		__set_task_state(task, state);

		/* didnt get the lock, go to sleep: */
249
		spin_unlock_mutex(&lock->wait_lock, flags);
250 251 252
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
253
		spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
254 255
	}

256
done:
P
Peter Zijlstra 已提交
257
	lock_acquired(&lock->dep_map, ip);
I
Ingo Molnar 已提交
258
	/* got the lock - rejoice! */
259 260
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	mutex_set_owner(lock);
I
Ingo Molnar 已提交
261 262 263 264 265

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

266
	spin_unlock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
267 268

	debug_mutex_free_waiter(&waiter);
P
Peter Zijlstra 已提交
269
	preempt_enable();
I
Ingo Molnar 已提交
270 271 272 273

	return 0;
}

274 275 276 277 278
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
P
Peter Zijlstra 已提交
279
	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
280 281 282
}

EXPORT_SYMBOL_GPL(mutex_lock_nested);
283

L
Liam R. Howlett 已提交
284 285 286 287 288 289 290 291
int __sched
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
	return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);

292 293 294 295
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
	might_sleep();
296 297
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
				   subclass, _RET_IP_);
298 299 300
}

EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
301 302
#endif

I
Ingo Molnar 已提交
303 304 305
/*
 * Release the lock, slowpath:
 */
306
static inline void
307
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
I
Ingo Molnar 已提交
308
{
309
	struct mutex *lock = container_of(lock_count, struct mutex, count);
310
	unsigned long flags;
I
Ingo Molnar 已提交
311

312
	spin_lock_mutex(&lock->wait_lock, flags);
313
	mutex_release(&lock->dep_map, nested, _RET_IP_);
314
	debug_mutex_unlock(lock);
I
Ingo Molnar 已提交
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334

	/*
	 * some architectures leave the lock unlocked in the fastpath failure
	 * case, others need to leave it locked. In the later case we have to
	 * unlock it here
	 */
	if (__mutex_slowpath_needs_to_unlock())
		atomic_set(&lock->count, 1);

	if (!list_empty(&lock->wait_list)) {
		/* get the first entry from the wait-list: */
		struct mutex_waiter *waiter =
				list_entry(lock->wait_list.next,
					   struct mutex_waiter, list);

		debug_mutex_wake_waiter(lock, waiter);

		wake_up_process(waiter->task);
	}

335
	spin_unlock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
336 337
}

338 339 340
/*
 * Release the lock, slowpath:
 */
341
static __used noinline void
342 343
__mutex_unlock_slowpath(atomic_t *lock_count)
{
344
	__mutex_unlock_common_slowpath(lock_count, 1);
345 346
}

P
Peter Zijlstra 已提交
347
#ifndef CONFIG_DEBUG_LOCK_ALLOC
I
Ingo Molnar 已提交
348 349 350 351
/*
 * Here come the less common (and hence less performance-critical) APIs:
 * mutex_lock_interruptible() and mutex_trylock().
 */
352
static noinline int __sched
L
Liam R. Howlett 已提交
353 354
__mutex_lock_killable_slowpath(atomic_t *lock_count);

355
static noinline int __sched
356
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
I
Ingo Molnar 已提交
357

358 359
/**
 * mutex_lock_interruptible - acquire the mutex, interruptible
I
Ingo Molnar 已提交
360 361 362 363 364 365 366 367 368
 * @lock: the mutex to be acquired
 *
 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
 * been acquired or sleep until the mutex becomes available. If a
 * signal arrives while waiting for the lock then this function
 * returns -EINTR.
 *
 * This function is similar to (but not equivalent to) down_interruptible().
 */
369
int __sched mutex_lock_interruptible(struct mutex *lock)
I
Ingo Molnar 已提交
370
{
371 372
	int ret;

373
	might_sleep();
374
	ret =  __mutex_fastpath_lock_retval
I
Ingo Molnar 已提交
375
			(&lock->count, __mutex_lock_interruptible_slowpath);
376 377 378 379
	if (!ret)
		mutex_set_owner(lock);

	return ret;
I
Ingo Molnar 已提交
380 381 382 383
}

EXPORT_SYMBOL(mutex_lock_interruptible);

384
int __sched mutex_lock_killable(struct mutex *lock)
L
Liam R. Howlett 已提交
385
{
386 387
	int ret;

L
Liam R. Howlett 已提交
388
	might_sleep();
389
	ret = __mutex_fastpath_lock_retval
L
Liam R. Howlett 已提交
390
			(&lock->count, __mutex_lock_killable_slowpath);
391 392 393 394
	if (!ret)
		mutex_set_owner(lock);

	return ret;
L
Liam R. Howlett 已提交
395 396 397
}
EXPORT_SYMBOL(mutex_lock_killable);

398
static __used noinline void __sched
P
Peter Zijlstra 已提交
399 400 401 402 403 404 405
__mutex_lock_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);

	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
}

406
static noinline int __sched
L
Liam R. Howlett 已提交
407 408 409 410 411 412 413
__mutex_lock_killable_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);

	return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
}

414
static noinline int __sched
415
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
I
Ingo Molnar 已提交
416 417 418
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);

P
Peter Zijlstra 已提交
419
	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
I
Ingo Molnar 已提交
420
}
P
Peter Zijlstra 已提交
421
#endif
I
Ingo Molnar 已提交
422 423 424 425 426 427 428 429

/*
 * Spinlock based trylock, we take the spinlock and check whether we
 * can get the lock:
 */
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);
430
	unsigned long flags;
I
Ingo Molnar 已提交
431 432
	int prev;

433
	spin_lock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
434 435

	prev = atomic_xchg(&lock->count, -1);
436
	if (likely(prev == 1)) {
437
		mutex_set_owner(lock);
438 439
		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
	}
440

I
Ingo Molnar 已提交
441 442 443 444
	/* Set it back to 0 if there are no waiters: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

445
	spin_unlock_mutex(&lock->wait_lock, flags);
I
Ingo Molnar 已提交
446 447 448 449

	return prev == 1;
}

450 451
/**
 * mutex_trylock - try to acquire the mutex, without waiting
I
Ingo Molnar 已提交
452 453 454 455 456 457
 * @lock: the mutex to be acquired
 *
 * Try to acquire the mutex atomically. Returns 1 if the mutex
 * has been acquired successfully, and 0 on contention.
 *
 * NOTE: this function follows the spin_trylock() convention, so
458
 * it is negated from the down_trylock() return values! Be careful
I
Ingo Molnar 已提交
459 460 461 462 463
 * about this when converting semaphore users to mutexes.
 *
 * This function must not be used in interrupt context. The
 * mutex must be released by the same task that acquired it.
 */
464
int __sched mutex_trylock(struct mutex *lock)
I
Ingo Molnar 已提交
465
{
466 467 468 469 470 471 472
	int ret;

	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
	if (ret)
		mutex_set_owner(lock);

	return ret;
I
Ingo Molnar 已提交
473 474
}
EXPORT_SYMBOL(mutex_trylock);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498

/**
 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
 * @cnt: the atomic which we are to dec
 * @lock: the mutex to return holding if we dec to 0
 *
 * return true and hold lock if we dec to 0, return false otherwise
 */
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
	/* dec if we can't possibly hit 0 */
	if (atomic_add_unless(cnt, -1, 1))
		return 0;
	/* we might hit 0, so take the lock */
	mutex_lock(lock);
	if (!atomic_dec_and_test(cnt)) {
		/* when we actually did the dec, we didn't hit 0 */
		mutex_unlock(lock);
		return 0;
	}
	/* we hit 0, and we hold the lock */
	return 1;
}
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);