futex.c 76.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
11 12 13 14
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
15 16 17 18
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
19 20 21
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
22 23 24 25
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
57
#include <linux/signal.h>
58
#include <linux/export.h>
59
#include <linux/magic.h>
60 61
#include <linux/pid.h>
#include <linux/nsproxy.h>
62
#include <linux/ptrace.h>
63
#include <linux/sched/rt.h>
64
#include <linux/hugetlb.h>
C
Colin Cross 已提交
65
#include <linux/freezer.h>
66
#include <linux/bootmem.h>
67

68
#include <asm/futex.h>
L
Linus Torvalds 已提交
69

70
#include "locking/rtmutex_common.h"
71

72 73 74 75 76 77
/*
 * Basic futex operation and ordering guarantees:
 *
 * The waiter reads the futex value in user space and calls
 * futex_wait(). This function computes the hash bucket and acquires
 * the hash bucket lock. After that it reads the futex user space value
78 79 80
 * again and verifies that the data has not changed. If it has not changed
 * it enqueues itself into the hash bucket, releases the hash bucket lock
 * and schedules.
81 82
 *
 * The waker side modifies the user space value of the futex and calls
83 84 85
 * futex_wake(). This function computes the hash bucket and acquires the
 * hash bucket lock. Then it looks for waiters on that futex in the hash
 * bucket and wakes them.
86
 *
87 88 89 90 91
 * In futex wake up scenarios where no tasks are blocked on a futex, taking
 * the hb spinlock can be avoided and simply return. In order for this
 * optimization to work, ordering guarantees must exist so that the waiter
 * being added to the list is acknowledged when the list is concurrently being
 * checked by the waker, avoiding scenarios like the following:
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
 *
 * CPU 0                               CPU 1
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
 *   uval = *futex;
 *                                     *futex = newval;
 *                                     sys_futex(WAKE, futex);
 *                                       futex_wake(futex);
 *                                       if (queue_empty())
 *                                         return;
 *   if (uval == val)
 *      lock(hash_bucket(futex));
 *      queue();
 *     unlock(hash_bucket(futex));
 *     schedule();
 *
 * This would cause the waiter on CPU 0 to wait forever because it
 * missed the transition of the user space value from val to newval
 * and the waker did not find the waiter in the hash bucket queue.
 *
113 114 115 116 117
 * The correct serialization ensures that a waiter either observes
 * the changed user space value before blocking or is woken by a
 * concurrent waker:
 *
 * CPU 0                                 CPU 1
118 119 120
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
121 122 123 124 125 126 127 128 129 130 131 132
 *
 *   waiters++;
 *   mb(); (A) <-- paired with -.
 *                              |
 *   lock(hash_bucket(futex));  |
 *                              |
 *   uval = *futex;             |
 *                              |        *futex = newval;
 *                              |        sys_futex(WAKE, futex);
 *                              |          futex_wake(futex);
 *                              |
 *                              `------->  mb(); (B)
133
 *   if (uval == val)
134
 *     queue();
135
 *     unlock(hash_bucket(futex));
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
 *     schedule();                         if (waiters)
 *                                           lock(hash_bucket(futex));
 *                                           wake_waiters(futex);
 *                                           unlock(hash_bucket(futex));
 *
 * Where (A) orders the waiters increment and the futex value read -- this
 * is guaranteed by the head counter in the hb spinlock; and where (B)
 * orders the write to futex and the waiters read -- this is done by the
 * barriers in get_futex_key_refs(), through either ihold or atomic_inc,
 * depending on the futex type.
 *
 * This yields the following case (where X:=waiters, Y:=futex):
 *
 *	X = Y = 0
 *
 *	w[X]=1		w[Y]=1
 *	MB		MB
 *	r[Y]=y		r[X]=x
 *
 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 * the guarantee that we cannot both miss the futex variable change and the
 * enqueue.
158 159
 */

160
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
161
int __read_mostly futex_cmpxchg_enabled;
162
#endif
163

164 165 166 167 168 169 170 171
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
#define FLAGS_SHARED		0x01
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
	atomic_t refcount;

	union futex_key key;
};

193 194
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
195
 * @list:		priority-sorted list of tasks waiting on this futex
196 197 198 199 200 201 202 203 204
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
L
Linus Torvalds 已提交
205 206 207
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
208
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
209
 * The order of wakeup is always to make the first condition true, then
210 211 212 213
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
214 215
 */
struct futex_q {
P
Pierre Peiffer 已提交
216
	struct plist_node list;
L
Linus Torvalds 已提交
217

218
	struct task_struct *task;
L
Linus Torvalds 已提交
219 220
	spinlock_t *lock_ptr;
	union futex_key key;
221
	struct futex_pi_state *pi_state;
222
	struct rt_mutex_waiter *rt_waiter;
223
	union futex_key *requeue_pi_key;
224
	u32 bitset;
L
Linus Torvalds 已提交
225 226
};

227 228 229 230 231 232
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
233
/*
D
Darren Hart 已提交
234 235 236
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
237 238
 */
struct futex_hash_bucket {
239
	atomic_t waiters;
P
Pierre Peiffer 已提交
240 241
	spinlock_t lock;
	struct plist_head chain;
242
} ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
243

244 245 246
static unsigned long __read_mostly futex_hashsize;

static struct futex_hash_bucket *futex_queues;
L
Linus Torvalds 已提交
247

248 249 250 251 252 253 254 255 256 257 258
static inline void futex_get_mm(union futex_key *key)
{
	atomic_inc(&key->private.mm->mm_count);
	/*
	 * Ensure futex_get_mm() implies a full barrier such that
	 * get_futex_key() implies a full barrier. This is relied upon
	 * as full barrier (B), see the ordering comment above.
	 */
	smp_mb__after_atomic_inc();
}

259 260 261 262
/*
 * Reflects a new waiter being added to the waitqueue.
 */
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
263 264
{
#ifdef CONFIG_SMP
265
	atomic_inc(&hb->waiters);
266
	/*
267
	 * Full barrier (A), see the ordering comment above.
268
	 */
269 270 271 272 273 274 275 276 277 278 279 280 281 282
	smp_mb__after_atomic_inc();
#endif
}

/*
 * Reflects a waiter being removed from the waitqueue by wakeup
 * paths.
 */
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	atomic_dec(&hb->waiters);
#endif
}
283

284 285 286 287
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	return atomic_read(&hb->waiters);
288
#else
289
	return 1;
290 291 292
#endif
}

L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300
/*
 * We hash on the keys returned from get_futex_key (see below).
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
301
	return &futex_queues[hash & (futex_hashsize - 1)];
L
Linus Torvalds 已提交
302 303 304 305 306 307 308
}

/*
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
309 310
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
311 312 313 314
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

315 316 317 318 319 320 321 322 323 324 325 326
/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
static void get_futex_key_refs(union futex_key *key)
{
	if (!key->both.ptr)
		return;

	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
327
		ihold(key->shared.inode); /* implies MB (B) */
328 329
		break;
	case FUT_OFF_MMSHARED:
330
		futex_get_mm(key); /* implies MB (B) */
331 332 333 334 335 336 337 338 339 340
		break;
	}
}

/*
 * Drop a reference to the resource addressed by a key.
 * The hash bucket spinlock must not be held.
 */
static void drop_futex_key_refs(union futex_key *key)
{
341 342 343
	if (!key->both.ptr) {
		/* If we're here then we tried to put a key we failed to get */
		WARN_ON_ONCE(1);
344
		return;
345
	}
346 347 348 349 350 351 352 353 354 355 356

	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
		iput(key->shared.inode);
		break;
	case FUT_OFF_MMSHARED:
		mmdrop(key->private.mm);
		break;
	}
}

E
Eric Dumazet 已提交
357
/**
358 359 360 361
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 * @key:	address where result is stored.
362 363
 * @rw:		mapping needs to be read/write (values: VERIFY_READ,
 *              VERIFY_WRITE)
E
Eric Dumazet 已提交
364
 *
365 366
 * Return: a negative error code or 0
 *
E
Eric Dumazet 已提交
367
 * The key words are stored in *key on success.
L
Linus Torvalds 已提交
368
 *
A
Al Viro 已提交
369
 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
L
Linus Torvalds 已提交
370 371 372
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
D
Darren Hart 已提交
373
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
374
 */
375
static int
376
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
L
Linus Torvalds 已提交
377
{
378
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
379
	struct mm_struct *mm = current->mm;
380
	struct page *page, *page_head;
381
	int err, ro = 0;
L
Linus Torvalds 已提交
382 383 384 385

	/*
	 * The futex address must be "naturally" aligned.
	 */
386
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
387
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
388
		return -EINVAL;
389
	address -= key->both.offset;
L
Linus Torvalds 已提交
390

391 392 393
	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
		return -EFAULT;

E
Eric Dumazet 已提交
394 395 396 397 398 399 400 401 402 403
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		key->private.mm = mm;
		key->private.address = address;
404
		get_futex_key_refs(key);  /* implies MB (B) */
E
Eric Dumazet 已提交
405 406
		return 0;
	}
L
Linus Torvalds 已提交
407

408
again:
409
	err = get_user_pages_fast(address, 1, 1, &page);
410 411 412 413 414 415 416 417
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
	if (err == -EFAULT && rw == VERIFY_READ) {
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
418 419
	if (err < 0)
		return err;
420 421
	else
		err = 0;
422

423 424 425
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	page_head = page;
	if (unlikely(PageTail(page))) {
426
		put_page(page);
427 428
		/* serialize against __split_huge_page_splitting() */
		local_irq_disable();
429
		if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
			page_head = compound_head(page);
			/*
			 * page_head is valid pointer but we must pin
			 * it before taking the PG_lock and/or
			 * PG_compound_lock. The moment we re-enable
			 * irqs __split_huge_page_splitting() can
			 * return and the head page can be freed from
			 * under us. We can't take the PG_lock and/or
			 * PG_compound_lock on a page that could be
			 * freed from under us.
			 */
			if (page != page_head) {
				get_page(page_head);
				put_page(page);
			}
			local_irq_enable();
		} else {
			local_irq_enable();
			goto again;
		}
	}
#else
	page_head = compound_head(page);
	if (page != page_head) {
		get_page(page_head);
		put_page(page);
	}
#endif

	lock_page(page_head);
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475

	/*
	 * If page_head->mapping is NULL, then it cannot be a PageAnon
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
	 * an unlikely race, but we do need to retry for page_head->mapping.
	 */
476
	if (!page_head->mapping) {
477
		int shmem_swizzled = PageSwapCache(page_head);
478 479
		unlock_page(page_head);
		put_page(page_head);
480 481 482
		if (shmem_swizzled)
			goto again;
		return -EFAULT;
483
	}
L
Linus Torvalds 已提交
484 485 486 487 488 489

	/*
	 * Private mappings are handled in a simple way.
	 *
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
490
	 * the object not the particular process.
L
Linus Torvalds 已提交
491
	 */
492
	if (PageAnon(page_head)) {
493 494 495 496 497 498 499 500 501
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
		if (ro) {
			err = -EFAULT;
			goto out;
		}

502
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
503
		key->private.mm = mm;
504
		key->private.address = address;
505 506
	} else {
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
507
		key->shared.inode = page_head->mapping->host;
508
		key->shared.pgoff = basepage_index(page);
L
Linus Torvalds 已提交
509 510
	}

511
	get_futex_key_refs(key); /* implies MB (B) */
L
Linus Torvalds 已提交
512

513
out:
514 515
	unlock_page(page_head);
	put_page(page_head);
516
	return err;
L
Linus Torvalds 已提交
517 518
}

519
static inline void put_futex_key(union futex_key *key)
L
Linus Torvalds 已提交
520
{
521
	drop_futex_key_refs(key);
L
Linus Torvalds 已提交
522 523
}

524 525
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
526 527 528 529 530
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
531
 * We have no generic implementation of a non-destructive write to the
532 533 534 535 536 537
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
538 539 540 541
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
542 543
	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
			       FAULT_FLAG_WRITE);
544 545
	up_read(&mm->mmap_sem);

546 547 548
	return ret < 0 ? ret : 0;
}

549 550
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
551 552
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

568 569
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
570
{
571
	int ret;
T
Thomas Gleixner 已提交
572 573

	pagefault_disable();
574
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
575 576
	pagefault_enable();

577
	return ret;
T
Thomas Gleixner 已提交
578 579 580
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
581 582 583
{
	int ret;

584
	pagefault_disable();
585
	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
586
	pagefault_enable();
L
Linus Torvalds 已提交
587 588 589 590

	return ret ? -EFAULT : 0;
}

591 592 593 594 595 596 597 598 599 600 601

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

602
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
603 604 605 606 607 608 609 610

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
	atomic_set(&pi_state->refcount, 1);
611
	pi_state->key = FUTEX_KEY_INIT;
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637

	current->pi_state_cache = pi_state;

	return 0;
}

static struct futex_pi_state * alloc_pi_state(void)
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

static void free_pi_state(struct futex_pi_state *pi_state)
{
	if (!atomic_dec_and_test(&pi_state->refcount))
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
638
		raw_spin_lock_irq(&pi_state->owner->pi_lock);
639
		list_del_init(&pi_state->list);
640
		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666

		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
	}

	if (current->pi_state_cache)
		kfree(pi_state);
	else {
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
		atomic_set(&pi_state->refcount, 1);
		current->pi_state_cache = pi_state;
	}
}

/*
 * Look up the task based on what TID userspace gave us.
 * We dont trust it.
 */
static struct task_struct * futex_find_get_task(pid_t pid)
{
	struct task_struct *p;

667
	rcu_read_lock();
668
	p = find_task_by_vpid(pid);
669 670
	if (p)
		get_task_struct(p);
671

672
	rcu_read_unlock();
673 674 675 676 677 678 679 680 681 682 683 684 685

	return p;
}

/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
void exit_pi_state_list(struct task_struct *curr)
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
686
	struct futex_hash_bucket *hb;
687
	union futex_key key = FUTEX_KEY_INIT;
688

689 690
	if (!futex_cmpxchg_enabled)
		return;
691 692 693
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
694
	 * versus waiters unqueueing themselves:
695
	 */
696
	raw_spin_lock_irq(&curr->pi_lock);
697 698 699 700 701
	while (!list_empty(head)) {

		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
702
		hb = hash_futex(&key);
703
		raw_spin_unlock_irq(&curr->pi_lock);
704 705 706

		spin_lock(&hb->lock);

707
		raw_spin_lock_irq(&curr->pi_lock);
708 709 710 711
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
712 713 714 715 716 717
		if (head->next != next) {
			spin_unlock(&hb->lock);
			continue;
		}

		WARN_ON(pi_state->owner != curr);
718 719
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
720
		pi_state->owner = NULL;
721
		raw_spin_unlock_irq(&curr->pi_lock);
722 723 724 725 726

		rt_mutex_unlock(&pi_state->pi_mutex);

		spin_unlock(&hb->lock);

727
		raw_spin_lock_irq(&curr->pi_lock);
728
	}
729
	raw_spin_unlock_irq(&curr->pi_lock);
730 731 732
}

static int
P
Pierre Peiffer 已提交
733 734
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
		union futex_key *key, struct futex_pi_state **ps)
735 736 737 738
{
	struct futex_pi_state *pi_state = NULL;
	struct futex_q *this, *next;
	struct task_struct *p;
739
	pid_t pid = uval & FUTEX_TID_MASK;
740

J
Jason Low 已提交
741
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
P
Pierre Peiffer 已提交
742
		if (match_futex(&this->key, key)) {
743 744 745 746 747
			/*
			 * Another waiter already exists - bump up
			 * the refcount and return its pi_state:
			 */
			pi_state = this->pi_state;
748
			/*
749
			 * Userspace might have messed up non-PI and PI futexes
750 751 752 753
			 */
			if (unlikely(!pi_state))
				return -EINVAL;

754
			WARN_ON(!atomic_read(&pi_state->refcount));
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773

			/*
			 * When pi_state->owner is NULL then the owner died
			 * and another waiter is on the fly. pi_state->owner
			 * is fixed up by the task which acquires
			 * pi_state->rt_mutex.
			 *
			 * We do not check for pid == 0 which can happen when
			 * the owner died and robust_list_exit() cleared the
			 * TID.
			 */
			if (pid && pi_state->owner) {
				/*
				 * Bail out if user space manipulated the
				 * futex value.
				 */
				if (pid != task_pid_vnr(pi_state->owner))
					return -EINVAL;
			}
774

775
			atomic_inc(&pi_state->refcount);
P
Pierre Peiffer 已提交
776
			*ps = pi_state;
777 778 779 780 781 782

			return 0;
		}
	}

	/*
783
	 * We are the first waiter - try to look up the real owner and attach
784
	 * the new pi_state to it, but bail out when TID = 0
785
	 */
786
	if (!pid)
787
		return -ESRCH;
788
	p = futex_find_get_task(pid);
789 790
	if (!p)
		return -ESRCH;
791 792 793 794 795 796 797

	/*
	 * We need to look at the task state flags to figure out,
	 * whether the task is exiting. To protect against the do_exit
	 * change of the task flags, we do this protected by
	 * p->pi_lock:
	 */
798
	raw_spin_lock_irq(&p->pi_lock);
799 800 801 802 803 804 805 806
	if (unlikely(p->flags & PF_EXITING)) {
		/*
		 * The task is on the way out. When PF_EXITPIDONE is
		 * set, we know that the task has finished the
		 * cleanup:
		 */
		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;

807
		raw_spin_unlock_irq(&p->pi_lock);
808 809 810
		put_task_struct(p);
		return ret;
	}
811 812 813 814 815 816 817 818 819 820

	pi_state = alloc_pi_state();

	/*
	 * Initialize the pi_mutex in locked state and make 'p'
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
821
	pi_state->key = *key;
822

823
	WARN_ON(!list_empty(&pi_state->list));
824 825
	list_add(&pi_state->list, &p->pi_state_list);
	pi_state->owner = p;
826
	raw_spin_unlock_irq(&p->pi_lock);
827 828 829

	put_task_struct(p);

P
Pierre Peiffer 已提交
830
	*ps = pi_state;
831 832 833 834

	return 0;
}

835
/**
836
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
837 838 839 840 841 842 843 844
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
845
 *
846 847 848
 * Return:
 *  0 - ready to wait;
 *  1 - acquired the lock;
849 850 851 852 853 854 855
 * <0 - error
 *
 * The hb->lock and futex_key refs shall be held by the caller.
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
856
				struct task_struct *task, int set_waiters)
857
{
858
	int lock_taken, ret, force_take = 0;
859
	u32 uval, newval, curval, vpid = task_pid_vnr(task);
860 861 862 863 864 865 866 867 868

retry:
	ret = lock_taken = 0;

	/*
	 * To avoid races, we attempt to take the lock here again
	 * (by doing a 0 -> TID atomic cmpxchg), while holding all
	 * the locks. It will most likely not succeed.
	 */
869
	newval = vpid;
870 871
	if (set_waiters)
		newval |= FUTEX_WAITERS;
872

873
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
874 875 876 877 878
		return -EFAULT;

	/*
	 * Detect deadlocks.
	 */
879
	if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
		return -EDEADLK;

	/*
	 * Surprise - we got the lock. Just return to userspace:
	 */
	if (unlikely(!curval))
		return 1;

	uval = curval;

	/*
	 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
	 * to wake at the next unlock.
	 */
	newval = curval | FUTEX_WAITERS;

	/*
897
	 * Should we force take the futex? See below.
898
	 */
899 900 901 902 903
	if (unlikely(force_take)) {
		/*
		 * Keep the OWNER_DIED and the WAITERS bit and set the
		 * new TID value.
		 */
904
		newval = (curval & ~FUTEX_TID_MASK) | vpid;
905
		force_take = 0;
906 907 908
		lock_taken = 1;
	}

909
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
910 911 912 913 914
		return -EFAULT;
	if (unlikely(curval != uval))
		goto retry;

	/*
915
	 * We took the lock due to forced take over.
916 917 918 919 920 921 922 923 924 925 926 927 928 929
	 */
	if (unlikely(lock_taken))
		return 1;

	/*
	 * We dont have the lock. Look up the PI state (or create it if
	 * we are the first waiter):
	 */
	ret = lookup_pi_state(uval, hb, key, ps);

	if (unlikely(ret)) {
		switch (ret) {
		case -ESRCH:
			/*
930 931 932 933 934 935 936 937
			 * We failed to find an owner for this
			 * futex. So we have no pi_state to block
			 * on. This can happen in two cases:
			 *
			 * 1) The owner died
			 * 2) A stale FUTEX_WAITERS bit
			 *
			 * Re-read the futex value.
938 939 940 941 942
			 */
			if (get_futex_value_locked(&curval, uaddr))
				return -EFAULT;

			/*
943 944 945
			 * If the owner died or we have a stale
			 * WAITERS bit the owner TID in the user space
			 * futex is 0.
946
			 */
947 948
			if (!(curval & FUTEX_TID_MASK)) {
				force_take = 1;
949 950 951 952 953 954 955 956 957 958
				goto retry;
			}
		default:
			break;
		}
	}

	return ret;
}

959 960 961 962 963 964 965 966 967 968
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

969 970
	if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
	    || WARN_ON(plist_node_empty(&q->list)))
971 972 973 974
		return;

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
975
	hb_waiters_dec(hb);
976 977
}

L
Linus Torvalds 已提交
978 979 980 981 982 983
/*
 * The hash bucket lock must be held when this is called.
 * Afterwards, the futex_q must not be accessed.
 */
static void wake_futex(struct futex_q *q)
{
T
Thomas Gleixner 已提交
984 985
	struct task_struct *p = q->task;

986 987 988
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

L
Linus Torvalds 已提交
989
	/*
T
Thomas Gleixner 已提交
990
	 * We set q->lock_ptr = NULL _before_ we wake up the task. If
991 992
	 * a non-futex wake up happens on another CPU then the task
	 * might exit and p would dereference a non-existing task
T
Thomas Gleixner 已提交
993 994
	 * struct. Prevent this by holding a reference on p across the
	 * wake up.
L
Linus Torvalds 已提交
995
	 */
T
Thomas Gleixner 已提交
996 997
	get_task_struct(p);

998
	__unqueue_futex(q);
L
Linus Torvalds 已提交
999
	/*
T
Thomas Gleixner 已提交
1000 1001 1002 1003
	 * The waiting task can free the futex_q as soon as
	 * q->lock_ptr = NULL is written, without taking any locks. A
	 * memory barrier is required here to prevent the following
	 * store to lock_ptr from getting ahead of the plist_del.
L
Linus Torvalds 已提交
1004
	 */
1005
	smp_wmb();
L
Linus Torvalds 已提交
1006
	q->lock_ptr = NULL;
T
Thomas Gleixner 已提交
1007 1008 1009

	wake_up_state(p, TASK_NORMAL);
	put_task_struct(p);
L
Linus Torvalds 已提交
1010 1011
}

1012 1013 1014 1015
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
	struct task_struct *new_owner;
	struct futex_pi_state *pi_state = this->pi_state;
1016
	u32 uninitialized_var(curval), newval;
1017 1018 1019 1020

	if (!pi_state)
		return -EINVAL;

1021 1022 1023 1024 1025 1026 1027
	/*
	 * If current does not own the pi_state then the futex is
	 * inconsistent and user space fiddled with the futex value.
	 */
	if (pi_state->owner != current)
		return -EINVAL;

1028
	raw_spin_lock(&pi_state->pi_mutex.wait_lock);
1029 1030 1031
	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);

	/*
1032 1033 1034
	 * It is possible that the next waiter (the one that brought
	 * this owner to the kernel) timed out and is no longer
	 * waiting on the lock.
1035 1036 1037 1038 1039 1040 1041 1042 1043
	 */
	if (!new_owner)
		new_owner = this->task;

	/*
	 * We pass it to the next owner. (The WAITERS bit is always
	 * kept enabled while there is PI state around. We must also
	 * preserve the owner died bit.)
	 */
1044
	if (!(uval & FUTEX_OWNER_DIED)) {
1045 1046
		int ret = 0;

1047
		newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1048

1049
		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1050
			ret = -EFAULT;
1051
		else if (curval != uval)
1052 1053
			ret = -EINVAL;
		if (ret) {
1054
			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1055 1056
			return ret;
		}
1057
	}
1058

1059
	raw_spin_lock_irq(&pi_state->owner->pi_lock);
1060 1061
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
1062
	raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1063

1064
	raw_spin_lock_irq(&new_owner->pi_lock);
1065
	WARN_ON(!list_empty(&pi_state->list));
1066 1067
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
1068
	raw_spin_unlock_irq(&new_owner->pi_lock);
1069

1070
	raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1071 1072 1073 1074 1075 1076 1077
	rt_mutex_unlock(&pi_state->pi_mutex);

	return 0;
}

static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
1078
	u32 uninitialized_var(oldval);
1079 1080 1081 1082 1083

	/*
	 * There is no waiter, so we unlock the futex. The owner died
	 * bit has not to be preserved here. We are the owner:
	 */
1084 1085
	if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
		return -EFAULT;
1086 1087 1088 1089 1090 1091
	if (oldval != uval)
		return -EAGAIN;

	return 0;
}

I
Ingo Molnar 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
1108 1109 1110
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
1111
	spin_unlock(&hb1->lock);
1112 1113
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
1114 1115
}

L
Linus Torvalds 已提交
1116
/*
D
Darren Hart 已提交
1117
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
1118
 */
1119 1120
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
1121
{
1122
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1123
	struct futex_q *this, *next;
1124
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
1125 1126
	int ret;

1127 1128 1129
	if (!bitset)
		return -EINVAL;

1130
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
L
Linus Torvalds 已提交
1131 1132 1133
	if (unlikely(ret != 0))
		goto out;

1134
	hb = hash_futex(&key);
1135 1136 1137 1138 1139

	/* Make sure we really have tasks to wakeup */
	if (!hb_waiters_pending(hb))
		goto out_put_key;

1140
	spin_lock(&hb->lock);
L
Linus Torvalds 已提交
1141

J
Jason Low 已提交
1142
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
L
Linus Torvalds 已提交
1143
		if (match_futex (&this->key, &key)) {
1144
			if (this->pi_state || this->rt_waiter) {
1145 1146 1147
				ret = -EINVAL;
				break;
			}
1148 1149 1150 1151 1152

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

L
Linus Torvalds 已提交
1153 1154 1155 1156 1157 1158
			wake_futex(this);
			if (++ret >= nr_wake)
				break;
		}
	}

1159
	spin_unlock(&hb->lock);
1160
out_put_key:
1161
	put_futex_key(&key);
1162
out:
L
Linus Torvalds 已提交
1163 1164 1165
	return ret;
}

1166 1167 1168 1169
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1170
static int
1171
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1172
	      int nr_wake, int nr_wake2, int op)
1173
{
1174
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1175
	struct futex_hash_bucket *hb1, *hb2;
1176
	struct futex_q *this, *next;
D
Darren Hart 已提交
1177
	int ret, op_ret;
1178

D
Darren Hart 已提交
1179
retry:
1180
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1181 1182
	if (unlikely(ret != 0))
		goto out;
1183
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1184
	if (unlikely(ret != 0))
1185
		goto out_put_key1;
1186

1187 1188
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1189

D
Darren Hart 已提交
1190
retry_private:
T
Thomas Gleixner 已提交
1191
	double_lock_hb(hb1, hb2);
1192
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1193 1194
	if (unlikely(op_ret < 0)) {

D
Darren Hart 已提交
1195
		double_unlock_hb(hb1, hb2);
1196

1197
#ifndef CONFIG_MMU
1198 1199 1200 1201
		/*
		 * we don't get EFAULT from MMU faults if we don't have an MMU,
		 * but we might get them from range checking
		 */
1202
		ret = op_ret;
1203
		goto out_put_keys;
1204 1205
#endif

1206 1207
		if (unlikely(op_ret != -EFAULT)) {
			ret = op_ret;
1208
			goto out_put_keys;
1209 1210
		}

1211
		ret = fault_in_user_writeable(uaddr2);
1212
		if (ret)
1213
			goto out_put_keys;
1214

1215
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1216 1217
			goto retry_private;

1218 1219
		put_futex_key(&key2);
		put_futex_key(&key1);
D
Darren Hart 已提交
1220
		goto retry;
1221 1222
	}

J
Jason Low 已提交
1223
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1224
		if (match_futex (&this->key, &key1)) {
1225 1226 1227 1228
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1229 1230 1231 1232 1233 1234 1235 1236
			wake_futex(this);
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
		op_ret = 0;
J
Jason Low 已提交
1237
		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1238
			if (match_futex (&this->key, &key2)) {
1239 1240 1241 1242
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1243 1244 1245 1246 1247 1248 1249 1250
				wake_futex(this);
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1251
out_unlock:
D
Darren Hart 已提交
1252
	double_unlock_hb(hb1, hb2);
1253
out_put_keys:
1254
	put_futex_key(&key2);
1255
out_put_key1:
1256
	put_futex_key(&key1);
1257
out:
1258 1259 1260
	return ret;
}

D
Darren Hart 已提交
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
1279
		hb_waiters_dec(hb1);
D
Darren Hart 已提交
1280
		plist_add(&q->list, &hb2->chain);
1281
		hb_waiters_inc(hb2);
D
Darren Hart 已提交
1282 1283 1284 1285 1286 1287
		q->lock_ptr = &hb2->lock;
	}
	get_futex_key_refs(key2);
	q->key = *key2;
}

1288 1289
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1290 1291 1292
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1293 1294 1295 1296 1297
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1298 1299 1300
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1301 1302
 */
static inline
1303 1304
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1305 1306 1307 1308
{
	get_futex_key_refs(key);
	q->key = *key;

1309
	__unqueue_futex(q);
1310 1311 1312 1313

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1314 1315
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1316
	wake_up_state(q->task, TASK_NORMAL);
1317 1318 1319 1320
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1321 1322 1323 1324 1325 1326 1327
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1328 1329
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1330 1331 1332
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1333
 *
1334 1335 1336
 * Return:
 *  0 - failed to acquire the lock atomically;
 *  1 - acquired the lock;
1337 1338 1339 1340 1341 1342
 * <0 - error
 */
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
				 struct futex_hash_bucket *hb1,
				 struct futex_hash_bucket *hb2,
				 union futex_key *key1, union futex_key *key2,
1343
				 struct futex_pi_state **ps, int set_waiters)
1344
{
1345
	struct futex_q *top_waiter = NULL;
1346 1347 1348 1349 1350 1351
	u32 curval;
	int ret;

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1352 1353 1354 1355 1356 1357 1358 1359
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1360 1361 1362 1363 1364 1365
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1366 1367 1368 1369
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1370
	/*
1371 1372 1373
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1374
	 */
1375 1376
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
				   set_waiters);
1377
	if (ret == 1)
1378
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1379 1380 1381 1382 1383 1384

	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1385
 * @uaddr1:	source futex user address
1386
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1387 1388 1389 1390 1391
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1392
 *		pi futex (pi to pi requeue is not supported)
1393 1394 1395 1396
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1397 1398
 * Return:
 * >=0 - on success, the number of tasks requeued or woken;
1399
 *  <0 - on error
L
Linus Torvalds 已提交
1400
 */
1401 1402 1403
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
1404
{
1405
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1406 1407
	int drop_count = 0, task_count = 0, ret;
	struct futex_pi_state *pi_state = NULL;
1408
	struct futex_hash_bucket *hb1, *hb2;
L
Linus Torvalds 已提交
1409
	struct futex_q *this, *next;
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	u32 curval2;

	if (requeue_pi) {
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
1432

1433
retry:
1434 1435 1436 1437 1438 1439 1440 1441 1442
	if (pi_state != NULL) {
		/*
		 * We will have to lookup the pi_state again, so free this one
		 * to keep the accounting correct.
		 */
		free_pi_state(pi_state);
		pi_state = NULL;
	}

1443
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
L
Linus Torvalds 已提交
1444 1445
	if (unlikely(ret != 0))
		goto out;
1446 1447
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
			    requeue_pi ? VERIFY_WRITE : VERIFY_READ);
L
Linus Torvalds 已提交
1448
	if (unlikely(ret != 0))
1449
		goto out_put_key1;
L
Linus Torvalds 已提交
1450

1451 1452
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
1453

D
Darren Hart 已提交
1454
retry_private:
1455
	hb_waiters_inc(hb2);
I
Ingo Molnar 已提交
1456
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1457

1458 1459
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
1460

1461
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
1462 1463

		if (unlikely(ret)) {
D
Darren Hart 已提交
1464
			double_unlock_hb(hb1, hb2);
1465
			hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
1466

1467
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
1468 1469
			if (ret)
				goto out_put_keys;
L
Linus Torvalds 已提交
1470

1471
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1472
				goto retry_private;
L
Linus Torvalds 已提交
1473

1474 1475
			put_futex_key(&key2);
			put_futex_key(&key1);
D
Darren Hart 已提交
1476
			goto retry;
L
Linus Torvalds 已提交
1477
		}
1478
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
1479 1480 1481 1482 1483
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

1484
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1485 1486 1487 1488 1489 1490
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
1491
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1492
						 &key2, &pi_state, nr_requeue);
1493 1494 1495 1496 1497 1498 1499 1500 1501

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
		 * reference to it.
		 */
		if (ret == 1) {
			WARN_ON(pi_state);
1502
			drop_count++;
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
			task_count++;
			ret = get_futex_value_locked(&curval2, uaddr2);
			if (!ret)
				ret = lookup_pi_state(curval2, hb2, &key2,
						      &pi_state);
		}

		switch (ret) {
		case 0:
			break;
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
1515
			hb_waiters_dec(hb2);
1516 1517
			put_futex_key(&key2);
			put_futex_key(&key1);
1518
			ret = fault_in_user_writeable(uaddr2);
1519 1520 1521 1522 1523 1524
			if (!ret)
				goto retry;
			goto out;
		case -EAGAIN:
			/* The owner was exiting, try again. */
			double_unlock_hb(hb1, hb2);
1525
			hb_waiters_dec(hb2);
1526 1527
			put_futex_key(&key2);
			put_futex_key(&key1);
1528 1529 1530 1531 1532 1533 1534
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

J
Jason Low 已提交
1535
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1536 1537 1538 1539
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
1540
			continue;
1541

1542 1543 1544
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
1545 1546 1547
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
1548 1549
		 */
		if ((requeue_pi && !this->rt_waiter) ||
1550 1551
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
1552 1553 1554
			ret = -EINVAL;
			break;
		}
1555 1556 1557 1558 1559 1560 1561

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
L
Linus Torvalds 已提交
1562
			wake_futex(this);
1563 1564
			continue;
		}
L
Linus Torvalds 已提交
1565

1566 1567 1568 1569 1570 1571
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
			/* Prepare the waiter to take the rt_mutex. */
			atomic_inc(&pi_state->refcount);
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
							this->task, 1);
			if (ret == 1) {
				/* We got the lock. */
1585
				requeue_pi_wake_futex(this, &key2, hb2);
1586
				drop_count++;
1587 1588 1589 1590 1591 1592 1593
				continue;
			} else if (ret) {
				/* -EDEADLK */
				this->pi_state = NULL;
				free_pi_state(pi_state);
				goto out_unlock;
			}
L
Linus Torvalds 已提交
1594
		}
1595 1596
		requeue_futex(this, hb1, hb2, &key2);
		drop_count++;
L
Linus Torvalds 已提交
1597 1598 1599
	}

out_unlock:
D
Darren Hart 已提交
1600
	double_unlock_hb(hb1, hb2);
1601
	hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
1602

1603 1604 1605 1606 1607 1608
	/*
	 * drop_futex_key_refs() must be called outside the spinlocks. During
	 * the requeue we moved futex_q's from the hash bucket at key1 to the
	 * one at key2 and updated their key pointer.  We no longer need to
	 * hold the references to key1.
	 */
L
Linus Torvalds 已提交
1609
	while (--drop_count >= 0)
1610
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
1611

1612
out_put_keys:
1613
	put_futex_key(&key2);
1614
out_put_key1:
1615
	put_futex_key(&key1);
1616
out:
1617 1618 1619
	if (pi_state != NULL)
		free_pi_state(pi_state);
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
1620 1621 1622
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
1623
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1624
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
1625
{
1626
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1627

1628
	hb = hash_futex(&q->key);
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639

	/*
	 * Increment the counter before taking the lock so that
	 * a potential waker won't miss a to-be-slept task that is
	 * waiting for the spinlock. This is safe as all queue_lock()
	 * users end up calling queue_me(). Similarly, for housekeeping,
	 * decrement the counter at queue_unlock() when some error has
	 * occurred and we don't end up adding the task to the list.
	 */
	hb_waiters_inc(hb);

1640
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
1641

1642
	spin_lock(&hb->lock); /* implies MB (A) */
1643
	return hb;
L
Linus Torvalds 已提交
1644 1645
}

1646
static inline void
J
Jason Low 已提交
1647
queue_unlock(struct futex_hash_bucket *hb)
1648
	__releases(&hb->lock)
1649 1650
{
	spin_unlock(&hb->lock);
1651
	hb_waiters_dec(hb);
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
E
Eric Sesterhenn 已提交
1666
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1667
	__releases(&hb->lock)
L
Linus Torvalds 已提交
1668
{
P
Pierre Peiffer 已提交
1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
1683
	q->task = current;
1684
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
1685 1686
}

1687 1688 1689 1690 1691 1692 1693
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
1694 1695
 * Return:
 *   1 - if the futex_q was still queued (and we removed unqueued it);
1696
 *   0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
1697 1698 1699 1700
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
1701
	int ret = 0;
L
Linus Torvalds 已提交
1702 1703

	/* In the common case we don't take the spinlock, which is nice. */
1704
retry:
L
Linus Torvalds 已提交
1705
	lock_ptr = q->lock_ptr;
1706
	barrier();
1707
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
1726
		__unqueue_futex(q);
1727 1728 1729

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
1730 1731 1732 1733
		spin_unlock(lock_ptr);
		ret = 1;
	}

1734
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
1735 1736 1737
	return ret;
}

1738 1739
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
1740 1741
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
1742
 */
P
Pierre Peiffer 已提交
1743
static void unqueue_me_pi(struct futex_q *q)
1744
	__releases(q->lock_ptr)
1745
{
1746
	__unqueue_futex(q);
1747 1748 1749 1750 1751

	BUG_ON(!q->pi_state);
	free_pi_state(q->pi_state);
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
1752
	spin_unlock(q->lock_ptr);
1753 1754
}

P
Pierre Peiffer 已提交
1755
/*
1756
 * Fixup the pi_state owner with the new owner.
P
Pierre Peiffer 已提交
1757
 *
1758 1759
 * Must be called with hash bucket lock held and mm->sem held for non
 * private futexes.
P
Pierre Peiffer 已提交
1760
 */
1761
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1762
				struct task_struct *newowner)
P
Pierre Peiffer 已提交
1763
{
1764
	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Pierre Peiffer 已提交
1765
	struct futex_pi_state *pi_state = q->pi_state;
1766
	struct task_struct *oldowner = pi_state->owner;
1767
	u32 uval, uninitialized_var(curval), newval;
D
Darren Hart 已提交
1768
	int ret;
P
Pierre Peiffer 已提交
1769 1770

	/* Owner died? */
1771 1772 1773 1774 1775
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;

	/*
	 * We are here either because we stole the rtmutex from the
1776 1777 1778 1779
	 * previous highest priority waiter or we are the highest priority
	 * waiter but failed to get the rtmutex the first time.
	 * We have to replace the newowner TID in the user space variable.
	 * This must be atomic as we have to preserve the owner died bit here.
1780
	 *
D
Darren Hart 已提交
1781 1782 1783
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
	 *
	 * Modifying pi_state _before_ the user space value would
	 * leave the pi_state in an inconsistent state when we fault
	 * here, because we need to drop the hash bucket lock to
	 * handle the fault. This might be observed in the PID check
	 * in lookup_pi_state.
	 */
retry:
	if (get_futex_value_locked(&uval, uaddr))
		goto handle_fault;

	while (1) {
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

1798
		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
			goto handle_fault;
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
1809
	if (pi_state->owner != NULL) {
1810
		raw_spin_lock_irq(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
1811 1812
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
1813
		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1814
	}
P
Pierre Peiffer 已提交
1815

1816
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
1817

1818
	raw_spin_lock_irq(&newowner->pi_lock);
P
Pierre Peiffer 已提交
1819
	WARN_ON(!list_empty(&pi_state->list));
1820
	list_add(&pi_state->list, &newowner->pi_state_list);
1821
	raw_spin_unlock_irq(&newowner->pi_lock);
1822
	return 0;
P
Pierre Peiffer 已提交
1823 1824

	/*
1825
	 * To handle the page fault we need to drop the hash bucket
1826 1827
	 * lock here. That gives the other task (either the highest priority
	 * waiter itself or the task which stole the rtmutex) the
1828 1829 1830 1831 1832
	 * chance to try the fixup of the pi_state. So once we are
	 * back from handling the fault we need to check the pi_state
	 * after reacquiring the hash bucket lock and before trying to
	 * do another fixup. When the fixup has been done already we
	 * simply return.
P
Pierre Peiffer 已提交
1833
	 */
1834 1835
handle_fault:
	spin_unlock(q->lock_ptr);
1836

1837
	ret = fault_in_user_writeable(uaddr);
1838

1839
	spin_lock(q->lock_ptr);
1840

1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
	/*
	 * Check if someone else fixed it for us:
	 */
	if (pi_state->owner != oldowner)
		return 0;

	if (ret)
		return ret;

	goto retry;
P
Pierre Peiffer 已提交
1851 1852
}

N
Nick Piggin 已提交
1853
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
1854

1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
1865 1866 1867
 * Return:
 *  1 - success, lock taken;
 *  0 - success, lock not taken;
1868 1869
 * <0 - on error (-EFAULT)
 */
1870
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
{
	struct task_struct *owner;
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
		 */
		if (q->pi_state->owner != current)
1881
			ret = fixup_pi_state_owner(uaddr, q, current);
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
		goto out;
	}

	/*
	 * Catch the rare case, where the lock was released when we were on the
	 * way back before we locked the hash bucket.
	 */
	if (q->pi_state->owner == current) {
		/*
		 * Try to get the rt_mutex now. This might fail as some other
		 * task acquired the rt_mutex after we removed ourself from the
		 * rt_mutex waiters list.
		 */
		if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
			locked = 1;
			goto out;
		}

		/*
		 * pi_state is incorrect, some other task did a lock steal and
		 * we returned due to timeout or signal without taking the
1903
		 * rt_mutex. Too late.
1904
		 */
1905
		raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
1906
		owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1907 1908 1909
		if (!owner)
			owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
		raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
1910
		ret = fixup_pi_state_owner(uaddr, q, owner);
1911 1912 1913 1914 1915
		goto out;
	}

	/*
	 * Paranoia check. If we did not take the lock, then we should not be
1916
	 * the owner of the rt_mutex.
1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
	 */
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);

out:
	return ret ? ret : locked;
}

1928 1929 1930 1931 1932 1933 1934
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
1935
				struct hrtimer_sleeper *timeout)
1936
{
1937 1938 1939 1940 1941 1942
	/*
	 * The task state is guaranteed to be set before another task can
	 * wake it. set_current_state() is implemented using set_mb() and
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
1943
	set_current_state(TASK_INTERRUPTIBLE);
1944
	queue_me(q, hb);
1945 1946 1947 1948 1949 1950 1951 1952 1953

	/* Arm the timer */
	if (timeout) {
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
		if (!hrtimer_active(&timeout->timer))
			timeout->task = NULL;
	}

	/*
1954 1955
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
1956 1957 1958 1959 1960 1961 1962 1963
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
1964
			freezable_schedule();
1965 1966 1967 1968
	}
	__set_current_state(TASK_RUNNING);
}

1969 1970 1971 1972
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
1973
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1974 1975 1976 1977 1978 1979 1980 1981
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
1982 1983
 * Return:
 *  0 - uaddr contains val and hb has been locked;
1984
 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
1985
 */
1986
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1987
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
1988
{
1989 1990
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
1991 1992

	/*
D
Darren Hart 已提交
1993
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
1994 1995 1996 1997 1998 1999 2000
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
2001 2002
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
2003 2004
	 * cond(var) false, which would violate the guarantee.
	 *
2005 2006 2007 2008
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
2009
	 */
2010
retry:
2011
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2012
	if (unlikely(ret != 0))
2013
		return ret;
2014 2015 2016 2017

retry_private:
	*hb = queue_lock(q);

2018
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
2019

2020
	if (ret) {
J
Jason Low 已提交
2021
		queue_unlock(*hb);
L
Linus Torvalds 已提交
2022

2023
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
2024
		if (ret)
2025
			goto out;
L
Linus Torvalds 已提交
2026

2027
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2028 2029
			goto retry_private;

2030
		put_futex_key(&q->key);
D
Darren Hart 已提交
2031
		goto retry;
L
Linus Torvalds 已提交
2032
	}
2033

2034
	if (uval != val) {
J
Jason Low 已提交
2035
		queue_unlock(*hb);
2036
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
2037
	}
L
Linus Torvalds 已提交
2038

2039 2040
out:
	if (ret)
2041
		put_futex_key(&q->key);
2042 2043 2044
	return ret;
}

2045 2046
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
2047 2048 2049 2050
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
2051
	struct futex_q q = futex_q_init;
2052 2053 2054 2055 2056 2057 2058 2059 2060
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

	if (abs_time) {
		to = &timeout;

2061 2062 2063
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2064 2065 2066 2067 2068
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

T
Thomas Gleixner 已提交
2069
retry:
2070 2071 2072 2073
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
2074
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2075 2076 2077
	if (ret)
		goto out;

2078
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
2079
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
2080 2081

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
2082
	ret = 0;
2083
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
2084
	if (!unqueue_me(&q))
2085
		goto out;
P
Peter Zijlstra 已提交
2086
	ret = -ETIMEDOUT;
2087
	if (to && !to->task)
2088
		goto out;
N
Nick Piggin 已提交
2089

2090
	/*
T
Thomas Gleixner 已提交
2091 2092
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
2093
	 */
2094
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
2095 2096
		goto retry;

P
Peter Zijlstra 已提交
2097
	ret = -ERESTARTSYS;
2098
	if (!abs_time)
2099
		goto out;
L
Linus Torvalds 已提交
2100

P
Peter Zijlstra 已提交
2101 2102
	restart = &current_thread_info()->restart_block;
	restart->fn = futex_wait_restart;
2103
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
2104 2105 2106
	restart->futex.val = val;
	restart->futex.time = abs_time->tv64;
	restart->futex.bitset = bitset;
2107
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2108

P
Peter Zijlstra 已提交
2109 2110
	ret = -ERESTART_RESTARTBLOCK;

2111
out:
2112 2113 2114 2115
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
2116 2117 2118
	return ret;
}

N
Nick Piggin 已提交
2119 2120 2121

static long futex_wait_restart(struct restart_block *restart)
{
2122
	u32 __user *uaddr = restart->futex.uaddr;
2123
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
2124

2125 2126 2127 2128
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
		t.tv64 = restart->futex.time;
		tp = &t;
	}
N
Nick Piggin 已提交
2129
	restart->fn = do_no_restart_syscall;
2130 2131 2132

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
2133 2134 2135
}


2136 2137 2138 2139 2140 2141
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
 * if there are waiters then it will block, it does PI, etc. (Due to
 * races the kernel might see a 0 value of the futex too.)
 */
2142 2143
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
			 ktime_t *time, int trylock)
2144
{
2145
	struct hrtimer_sleeper timeout, *to = NULL;
2146
	struct futex_hash_bucket *hb;
2147
	struct futex_q q = futex_q_init;
2148
	int res, ret;
2149 2150 2151 2152

	if (refill_pi_state_cache())
		return -ENOMEM;

2153
	if (time) {
2154
		to = &timeout;
2155 2156
		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
				      HRTIMER_MODE_ABS);
2157
		hrtimer_init_sleeper(to, current);
2158
		hrtimer_set_expires(&to->timer, *time);
2159 2160
	}

2161
retry:
2162
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2163
	if (unlikely(ret != 0))
2164
		goto out;
2165

D
Darren Hart 已提交
2166
retry_private:
E
Eric Sesterhenn 已提交
2167
	hb = queue_lock(&q);
2168

2169
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2170
	if (unlikely(ret)) {
2171
		switch (ret) {
2172 2173 2174 2175 2176 2177
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2178 2179 2180 2181 2182
		case -EAGAIN:
			/*
			 * Task is exiting and we just wait for the
			 * exit to complete.
			 */
J
Jason Low 已提交
2183
			queue_unlock(hb);
2184
			put_futex_key(&q.key);
2185 2186 2187
			cond_resched();
			goto retry;
		default:
2188
			goto out_unlock_put_key;
2189 2190 2191 2192 2193 2194
		}
	}

	/*
	 * Only actually queue now that the atomic ops are done:
	 */
E
Eric Sesterhenn 已提交
2195
	queue_me(&q, hb);
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208

	WARN_ON(!q.pi_state);
	/*
	 * Block on the PI mutex:
	 */
	if (!trylock)
		ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
	else {
		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
	}

2209
	spin_lock(q.lock_ptr);
2210 2211 2212 2213
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
2214
	res = fixup_owner(uaddr, &q, !ret);
2215 2216 2217 2218 2219 2220
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
2221

2222
	/*
2223 2224
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
2225 2226 2227 2228
	 */
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
		rt_mutex_unlock(&q.pi_state->pi_mutex);

2229 2230
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
2231

2232
	goto out_put_key;
2233

2234
out_unlock_put_key:
J
Jason Low 已提交
2235
	queue_unlock(hb);
2236

2237
out_put_key:
2238
	put_futex_key(&q.key);
2239
out:
2240 2241
	if (to)
		destroy_hrtimer_on_stack(&to->timer);
2242
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2243

2244
uaddr_faulted:
J
Jason Low 已提交
2245
	queue_unlock(hb);
2246

2247
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
2248 2249
	if (ret)
		goto out_put_key;
2250

2251
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2252 2253
		goto retry_private;

2254
	put_futex_key(&q.key);
D
Darren Hart 已提交
2255
	goto retry;
2256 2257 2258 2259 2260 2261 2262
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
2263
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2264 2265 2266
{
	struct futex_hash_bucket *hb;
	struct futex_q *this, *next;
2267
	union futex_key key = FUTEX_KEY_INIT;
2268
	u32 uval, vpid = task_pid_vnr(current);
D
Darren Hart 已提交
2269
	int ret;
2270 2271 2272 2273 2274 2275 2276

retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
2277
	if ((uval & FUTEX_TID_MASK) != vpid)
2278 2279
		return -EPERM;

2280
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
	if (unlikely(ret != 0))
		goto out;

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
	 * To avoid races, try to do the TID -> 0 atomic transition
	 * again. If it succeeds then we can return without waking
	 * anyone else up:
	 */
2292 2293
	if (!(uval & FUTEX_OWNER_DIED) &&
	    cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2294 2295 2296 2297 2298
		goto pi_faulted;
	/*
	 * Rare case: we managed to release the lock atomically,
	 * no need to wake anyone else up:
	 */
2299
	if (unlikely(uval == vpid))
2300 2301 2302 2303 2304 2305
		goto out_unlock;

	/*
	 * Ok, other tasks may need to be woken up - check waiters
	 * and do the wakeup if necessary:
	 */
J
Jason Low 已提交
2306
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
		if (!match_futex (&this->key, &key))
			continue;
		ret = wake_futex_pi(uaddr, uval, this);
		/*
		 * The atomic access to the futex value
		 * generated a pagefault, so retry the
		 * user-access and the wakeup:
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
		goto out_unlock;
	}
	/*
	 * No waiters - kernel unlocks the futex:
	 */
2322 2323 2324 2325 2326
	if (!(uval & FUTEX_OWNER_DIED)) {
		ret = unlock_futex_pi(uaddr, uval);
		if (ret == -EFAULT)
			goto pi_faulted;
	}
2327 2328 2329

out_unlock:
	spin_unlock(&hb->lock);
2330
	put_futex_key(&key);
2331

2332
out:
2333 2334 2335
	return ret;

pi_faulted:
2336
	spin_unlock(&hb->lock);
2337
	put_futex_key(&key);
2338

2339
	ret = fault_in_user_writeable(uaddr);
2340
	if (!ret)
2341 2342
		goto retry;

L
Linus Torvalds 已提交
2343 2344 2345
	return ret;
}

2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
2358 2359 2360
 * Return:
 *  0 = no early wakeup detected;
 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
2382
		plist_del(&q->list, &hb->chain);
2383
		hb_waiters_dec(hb);
2384

T
Thomas Gleixner 已提交
2385
		/* Handle spurious wakeups gracefully */
2386
		ret = -EWOULDBLOCK;
2387 2388
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
2389
		else if (signal_pending(current))
2390
			ret = -ERESTARTNOINTR;
2391 2392 2393 2394 2395 2396
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2397
 * @uaddr:	the futex we initially wait on (non-pi)
2398
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2399 2400 2401
 * 		the same type, no requeueing from private to shared, etc.
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
2402
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
2403 2404 2405
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2406 2407 2408 2409 2410
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
2411 2412
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2413
 * via the following--
2414
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2415 2416 2417
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
2418
 *
2419
 * If 3, cleanup and return -ERESTARTNOINTR.
2420 2421 2422 2423 2424 2425 2426
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
2427
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2428 2429 2430
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
2431 2432
 * Return:
 *  0 - On success;
2433 2434
 * <0 - On error
 */
2435
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2436
				 u32 val, ktime_t *abs_time, u32 bitset,
2437
				 u32 __user *uaddr2)
2438 2439 2440 2441 2442
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct rt_mutex_waiter rt_waiter;
	struct rt_mutex *pi_mutex = NULL;
	struct futex_hash_bucket *hb;
2443 2444
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
2445 2446
	int res, ret;

2447 2448 2449
	if (uaddr == uaddr2)
		return -EINVAL;

2450 2451 2452 2453 2454
	if (!bitset)
		return -EINVAL;

	if (abs_time) {
		to = &timeout;
2455 2456 2457
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
	debug_rt_mutex_init_waiter(&rt_waiter);
2468 2469
	RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
	RB_CLEAR_NODE(&rt_waiter.tree_entry);
2470 2471
	rt_waiter.task = NULL;

2472
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2473 2474 2475
	if (unlikely(ret != 0))
		goto out;

2476 2477 2478 2479
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

2480 2481 2482 2483
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
2484
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
2485 2486
	if (ret)
		goto out_key2;
2487 2488

	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
2489
	futex_wait_queue_me(hb, &q, to);
2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
		goto out_put_keys;

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
2501 2502 2503
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
2514
			ret = fixup_pi_state_owner(uaddr2, &q, current);
2515 2516 2517 2518 2519 2520 2521 2522
			spin_unlock(q.lock_ptr);
		}
	} else {
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
2523
		WARN_ON(!q.pi_state);
2524 2525 2526 2527 2528 2529 2530 2531 2532
		pi_mutex = &q.pi_state->pi_mutex;
		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
		debug_rt_mutex_free_waiter(&rt_waiter);

		spin_lock(q.lock_ptr);
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
2533
		res = fixup_owner(uaddr2, &q, !ret);
2534 2535
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
2536
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549
		 */
		if (res)
			ret = (res < 0) ? res : 0;

		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

	/*
	 * If fixup_pi_state_owner() faulted and was unable to handle the
	 * fault, unlock the rt_mutex and return the fault to userspace.
	 */
	if (ret == -EFAULT) {
2550
		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2551 2552 2553
			rt_mutex_unlock(pi_mutex);
	} else if (ret == -EINTR) {
		/*
2554 2555 2556 2557 2558
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
2559
		 */
2560
		ret = -EWOULDBLOCK;
2561 2562 2563
	}

out_put_keys:
2564
	put_futex_key(&q.key);
T
Thomas Gleixner 已提交
2565
out_key2:
2566
	put_futex_key(&key2);
2567 2568 2569 2570 2571 2572 2573 2574 2575

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

2576 2577 2578 2579 2580 2581 2582
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
2583
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2584 2585 2586 2587 2588 2589 2590 2591
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
2592 2593 2594
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
2595
 */
2596 2597
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
2598
{
2599 2600
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
2613 2614 2615 2616
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
2617
 */
2618 2619 2620
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
2621
{
A
Al Viro 已提交
2622
	struct robust_list_head __user *head;
2623
	unsigned long ret;
2624
	struct task_struct *p;
2625

2626 2627 2628
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

2629 2630 2631
	rcu_read_lock();

	ret = -ESRCH;
2632
	if (!pid)
2633
		p = current;
2634
	else {
2635
		p = find_task_by_vpid(pid);
2636 2637 2638 2639
		if (!p)
			goto err_unlock;
	}

2640 2641 2642 2643 2644 2645 2646
	ret = -EPERM;
	if (!ptrace_may_access(p, PTRACE_MODE_READ))
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

2647 2648 2649 2650 2651
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
2652
	rcu_read_unlock();
2653 2654 2655 2656 2657 2658 2659 2660

	return ret;
}

/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
2661
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2662
{
2663
	u32 uval, uninitialized_var(nval), mval;
2664

2665 2666
retry:
	if (get_user(uval, uaddr))
2667 2668
		return -1;

2669
	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
		/*
		 * Ok, this dying thread is truly holding a futex
		 * of interest. Set the OWNER_DIED bit atomically
		 * via cmpxchg, and if the value had FUTEX_WAITERS
		 * set, wake up a waiter (if any). (We have to do a
		 * futex_wake() even if OWNER_DIED is already set -
		 * to handle the rare but possible case of recursive
		 * thread-death.) The rest of the cleanup is done in
		 * userspace.
		 */
2680
		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
		/*
		 * We are not holding a lock here, but we want to have
		 * the pagefault_disable/enable() protection because
		 * we want to handle the fault gracefully. If the
		 * access fails we try to fault in the futex with R/W
		 * verification via get_user_pages. get_user() above
		 * does not guarantee R/W access. If that fails we
		 * give up and leave the futex locked.
		 */
		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
		}
2695
		if (nval != uval)
2696
			goto retry;
2697

2698 2699 2700 2701
		/*
		 * Wake robust non-PI futexes here. The wakeup of
		 * PI futexes happens in exit_pi_state():
		 */
T
Thomas Gleixner 已提交
2702
		if (!pi && (uval & FUTEX_WAITERS))
P
Peter Zijlstra 已提交
2703
			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2704 2705 2706 2707
	}
	return 0;
}

2708 2709 2710 2711
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
2712
				     struct robust_list __user * __user *head,
2713
				     unsigned int *pi)
2714 2715 2716
{
	unsigned long uentry;

A
Al Viro 已提交
2717
	if (get_user(uentry, (unsigned long __user *)head))
2718 2719
		return -EFAULT;

A
Al Viro 已提交
2720
	*entry = (void __user *)(uentry & ~1UL);
2721 2722 2723 2724 2725
	*pi = uentry & 1;

	return 0;
}

2726 2727 2728 2729 2730 2731 2732 2733 2734
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void exit_robust_list(struct task_struct *curr)
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
2735
	struct robust_list __user *entry, *next_entry, *pending;
2736 2737
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
2738
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
2739
	int rc;
2740

2741 2742 2743
	if (!futex_cmpxchg_enabled)
		return;

2744 2745 2746 2747
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
2748
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
2759
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2760
		return;
2761

M
Martin Schwidefsky 已提交
2762
	next_entry = NULL;	/* avoid warning with gcc */
2763
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
2764 2765 2766 2767 2768
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2769 2770
		/*
		 * A pending lock might already be on the list, so
2771
		 * don't process it twice:
2772 2773
		 */
		if (entry != pending)
A
Al Viro 已提交
2774
			if (handle_futex_death((void __user *)entry + futex_offset,
2775
						curr, pi))
2776
				return;
M
Martin Schwidefsky 已提交
2777
		if (rc)
2778
			return;
M
Martin Schwidefsky 已提交
2779 2780
		entry = next_entry;
		pi = next_pi;
2781 2782 2783 2784 2785 2786 2787 2788
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
2789 2790 2791 2792

	if (pending)
		handle_futex_death((void __user *)pending + futex_offset,
				   curr, pip);
2793 2794
}

2795
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2796
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
2797
{
T
Thomas Gleixner 已提交
2798
	int cmd = op & FUTEX_CMD_MASK;
2799
	unsigned int flags = 0;
E
Eric Dumazet 已提交
2800 2801

	if (!(op & FUTEX_PRIVATE_FLAG))
2802
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
2803

2804 2805 2806 2807 2808
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
		if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
2809

2810 2811 2812 2813 2814 2815 2816 2817 2818 2819
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
2820
	switch (cmd) {
L
Linus Torvalds 已提交
2821
	case FUTEX_WAIT:
2822 2823
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
2824
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
2825
	case FUTEX_WAKE:
2826 2827
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
2828
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
2829
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
2830
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
2831
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
2832
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2833
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
2834
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2835
	case FUTEX_LOCK_PI:
T
Thomas Gleixner 已提交
2836
		return futex_lock_pi(uaddr, flags, val, timeout, 0);
2837
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
2838
		return futex_unlock_pi(uaddr, flags);
2839
	case FUTEX_TRYLOCK_PI:
T
Thomas Gleixner 已提交
2840
		return futex_lock_pi(uaddr, flags, 0, timeout, 1);
2841 2842
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
2843 2844
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
2845
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
2846
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
2847
	}
T
Thomas Gleixner 已提交
2848
	return -ENOSYS;
L
Linus Torvalds 已提交
2849 2850 2851
}


2852 2853 2854
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
		struct timespec __user *, utime, u32 __user *, uaddr2,
		u32, val3)
L
Linus Torvalds 已提交
2855
{
2856 2857
	struct timespec ts;
	ktime_t t, *tp = NULL;
2858
	u32 val2 = 0;
E
Eric Dumazet 已提交
2859
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
2860

2861
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2862 2863
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
2864
		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
L
Linus Torvalds 已提交
2865
			return -EFAULT;
2866
		if (!timespec_valid(&ts))
2867
			return -EINVAL;
2868 2869

		t = timespec_to_ktime(ts);
E
Eric Dumazet 已提交
2870
		if (cmd == FUTEX_WAIT)
2871
			t = ktime_add_safe(ktime_get(), t);
2872
		tp = &t;
L
Linus Torvalds 已提交
2873 2874
	}
	/*
2875
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2876
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
2877
	 */
2878
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2879
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2880
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
2881

2882
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
2883 2884
}

2885
static void __init futex_detect_cmpxchg(void)
L
Linus Torvalds 已提交
2886
{
2887
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
2888
	u32 curval;
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906

	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
	 * implementation, the non-functional ones will return
	 * -ENOSYS.
	 */
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
		futex_cmpxchg_enabled = 1;
#endif
}

static int __init futex_init(void)
{
2907
	unsigned int futex_shift;
2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
	unsigned long i;

#if CONFIG_BASE_SMALL
	futex_hashsize = 16;
#else
	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif

	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
					       futex_hashsize, 0,
					       futex_hashsize < 256 ? HASH_SMALL : 0,
2919 2920 2921
					       &futex_shift, NULL,
					       futex_hashsize, futex_hashsize);
	futex_hashsize = 1UL << futex_shift;
2922 2923

	futex_detect_cmpxchg();
2924

2925
	for (i = 0; i < futex_hashsize; i++) {
2926
		atomic_set(&futex_queues[i].waiters, 0);
2927
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
2928 2929 2930
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
2931 2932
	return 0;
}
2933
__initcall(futex_init);