futex.c 89.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
11 12 13 14
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
15 16 17 18
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
19 20 21
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
22 23 24 25
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
57
#include <linux/signal.h>
58
#include <linux/export.h>
59
#include <linux/magic.h>
60 61
#include <linux/pid.h>
#include <linux/nsproxy.h>
62
#include <linux/ptrace.h>
63
#include <linux/sched/rt.h>
64
#include <linux/hugetlb.h>
C
Colin Cross 已提交
65
#include <linux/freezer.h>
66
#include <linux/bootmem.h>
67
#include <linux/fault-inject.h>
68

69
#include <asm/futex.h>
L
Linus Torvalds 已提交
70

71
#include "locking/rtmutex_common.h"
72

73
/*
74 75 76 77
 * READ this before attempting to hack on futexes!
 *
 * Basic futex operation and ordering guarantees
 * =============================================
78 79 80 81
 *
 * The waiter reads the futex value in user space and calls
 * futex_wait(). This function computes the hash bucket and acquires
 * the hash bucket lock. After that it reads the futex user space value
82 83 84
 * again and verifies that the data has not changed. If it has not changed
 * it enqueues itself into the hash bucket, releases the hash bucket lock
 * and schedules.
85 86
 *
 * The waker side modifies the user space value of the futex and calls
87 88 89
 * futex_wake(). This function computes the hash bucket and acquires the
 * hash bucket lock. Then it looks for waiters on that futex in the hash
 * bucket and wakes them.
90
 *
91 92 93 94 95
 * In futex wake up scenarios where no tasks are blocked on a futex, taking
 * the hb spinlock can be avoided and simply return. In order for this
 * optimization to work, ordering guarantees must exist so that the waiter
 * being added to the list is acknowledged when the list is concurrently being
 * checked by the waker, avoiding scenarios like the following:
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
 *
 * CPU 0                               CPU 1
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
 *   uval = *futex;
 *                                     *futex = newval;
 *                                     sys_futex(WAKE, futex);
 *                                       futex_wake(futex);
 *                                       if (queue_empty())
 *                                         return;
 *   if (uval == val)
 *      lock(hash_bucket(futex));
 *      queue();
 *     unlock(hash_bucket(futex));
 *     schedule();
 *
 * This would cause the waiter on CPU 0 to wait forever because it
 * missed the transition of the user space value from val to newval
 * and the waker did not find the waiter in the hash bucket queue.
 *
117 118 119 120 121
 * The correct serialization ensures that a waiter either observes
 * the changed user space value before blocking or is woken by a
 * concurrent waker:
 *
 * CPU 0                                 CPU 1
122 123 124
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
125
 *
126
 *   waiters++; (a)
127 128 129 130 131 132 133 134 135 136
 *   smp_mb(); (A) <-- paired with -.
 *                                  |
 *   lock(hash_bucket(futex));      |
 *                                  |
 *   uval = *futex;                 |
 *                                  |        *futex = newval;
 *                                  |        sys_futex(WAKE, futex);
 *                                  |          futex_wake(futex);
 *                                  |
 *                                  `--------> smp_mb(); (B)
137
 *   if (uval == val)
138
 *     queue();
139
 *     unlock(hash_bucket(futex));
140 141
 *     schedule();                         if (waiters)
 *                                           lock(hash_bucket(futex));
142 143
 *   else                                    wake_waiters(futex);
 *     waiters--; (b)                        unlock(hash_bucket(futex));
144
 *
145 146
 * Where (A) orders the waiters increment and the futex value read through
 * atomic operations (see hb_waiters_inc) and where (B) orders the write
147 148
 * to futex and the waiters read -- this is done by the barriers for both
 * shared and private futexes in get_futex_key_refs().
149 150 151 152 153 154 155 156 157 158 159 160
 *
 * This yields the following case (where X:=waiters, Y:=futex):
 *
 *	X = Y = 0
 *
 *	w[X]=1		w[Y]=1
 *	MB		MB
 *	r[Y]=y		r[X]=x
 *
 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 * the guarantee that we cannot both miss the futex variable change and the
 * enqueue.
161 162 163 164 165 166 167 168 169 170 171
 *
 * Note that a new waiter is accounted for in (a) even when it is possible that
 * the wait call can return error, in which case we backtrack from it in (b).
 * Refer to the comment in queue_lock().
 *
 * Similarly, in order to account for waiters being requeued on another
 * address we always increment the waiters for the destination bucket before
 * acquiring the lock. It then decrements them again  after releasing it -
 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 * will do the additional required waiter count housekeeping. This is done for
 * double_lock_hb() and double_unlock_hb(), respectively.
172 173
 */

174
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175
int __read_mostly futex_cmpxchg_enabled;
176
#endif
177

178 179 180 181
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
182 183 184 185 186 187 188 189 190
#ifdef CONFIG_MMU
# define FLAGS_SHARED		0x01
#else
/*
 * NOMMU does not have per process address space. Let the compiler optimize
 * code away.
 */
# define FLAGS_SHARED		0x00
#endif
191 192 193
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
	atomic_t refcount;

	union futex_key key;
};

215 216
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
217
 * @list:		priority-sorted list of tasks waiting on this futex
218 219 220 221 222 223 224 225 226
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
L
Linus Torvalds 已提交
227 228 229
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
230
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
231
 * The order of wakeup is always to make the first condition true, then
232 233 234 235
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
236 237
 */
struct futex_q {
P
Pierre Peiffer 已提交
238
	struct plist_node list;
L
Linus Torvalds 已提交
239

240
	struct task_struct *task;
L
Linus Torvalds 已提交
241 242
	spinlock_t *lock_ptr;
	union futex_key key;
243
	struct futex_pi_state *pi_state;
244
	struct rt_mutex_waiter *rt_waiter;
245
	union futex_key *requeue_pi_key;
246
	u32 bitset;
L
Linus Torvalds 已提交
247 248
};

249 250 251 252 253 254
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
255
/*
D
Darren Hart 已提交
256 257 258
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
259 260
 */
struct futex_hash_bucket {
261
	atomic_t waiters;
P
Pierre Peiffer 已提交
262 263
	spinlock_t lock;
	struct plist_head chain;
264
} ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
265

266 267 268 269 270 271 272 273 274 275 276
/*
 * The base of the bucket array and its size are always used together
 * (after initialization only in hash_futex()), so ensure that they
 * reside in the same cacheline.
 */
static struct {
	struct futex_hash_bucket *queues;
	unsigned long            hashsize;
} __futex_data __read_mostly __aligned(2*sizeof(long));
#define futex_queues   (__futex_data.queues)
#define futex_hashsize (__futex_data.hashsize)
277

L
Linus Torvalds 已提交
278

279 280 281 282 283 284 285 286
/*
 * Fault injections for futexes.
 */
#ifdef CONFIG_FAIL_FUTEX

static struct {
	struct fault_attr attr;

287
	bool ignore_private;
288 289
} fail_futex = {
	.attr = FAULT_ATTR_INITIALIZER,
290
	.ignore_private = false,
291 292 293 294 295 296 297 298
};

static int __init setup_fail_futex(char *str)
{
	return setup_fault_attr(&fail_futex.attr, str);
}
__setup("fail_futex=", setup_fail_futex);

299
static bool should_fail_futex(bool fshared)
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
{
	if (fail_futex.ignore_private && !fshared)
		return false;

	return should_fail(&fail_futex.attr, 1);
}

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init fail_futex_debugfs(void)
{
	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
	struct dentry *dir;

	dir = fault_create_debugfs_attr("fail_futex", NULL,
					&fail_futex.attr);
	if (IS_ERR(dir))
		return PTR_ERR(dir);

	if (!debugfs_create_bool("ignore-private", mode, dir,
				 &fail_futex.ignore_private)) {
		debugfs_remove_recursive(dir);
		return -ENOMEM;
	}

	return 0;
}

late_initcall(fail_futex_debugfs);

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

#else
static inline bool should_fail_futex(bool fshared)
{
	return false;
}
#endif /* CONFIG_FAIL_FUTEX */

339 340 341 342 343 344
static inline void futex_get_mm(union futex_key *key)
{
	atomic_inc(&key->private.mm->mm_count);
	/*
	 * Ensure futex_get_mm() implies a full barrier such that
	 * get_futex_key() implies a full barrier. This is relied upon
345
	 * as smp_mb(); (B), see the ordering comment above.
346
	 */
347
	smp_mb__after_atomic();
348 349
}

350 351 352 353
/*
 * Reflects a new waiter being added to the waitqueue.
 */
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
354 355
{
#ifdef CONFIG_SMP
356
	atomic_inc(&hb->waiters);
357
	/*
358
	 * Full barrier (A), see the ordering comment above.
359
	 */
360
	smp_mb__after_atomic();
361 362 363 364 365 366 367 368 369 370 371 372 373
#endif
}

/*
 * Reflects a waiter being removed from the waitqueue by wakeup
 * paths.
 */
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	atomic_dec(&hb->waiters);
#endif
}
374

375 376 377 378
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	return atomic_read(&hb->waiters);
379
#else
380
	return 1;
381 382 383
#endif
}

384 385 386 387 388 389
/**
 * hash_futex - Return the hash bucket in the global hash
 * @key:	Pointer to the futex key for which the hash is calculated
 *
 * We hash on the keys returned from get_futex_key (see below) and return the
 * corresponding hash bucket in the global hash.
L
Linus Torvalds 已提交
390 391 392 393 394 395
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
396
	return &futex_queues[hash & (futex_hashsize - 1)];
L
Linus Torvalds 已提交
397 398
}

399 400 401 402 403 404

/**
 * match_futex - Check whether two futex keys are equal
 * @key1:	Pointer to key1
 * @key2:	Pointer to key2
 *
L
Linus Torvalds 已提交
405 406 407 408
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
409 410
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
411 412 413 414
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

415 416 417 418 419 420 421 422 423 424
/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
static void get_futex_key_refs(union futex_key *key)
{
	if (!key->both.ptr)
		return;

425 426 427 428 429 430 431 432 433 434
	/*
	 * On MMU less systems futexes are always "private" as there is no per
	 * process address space. We need the smp wmb nevertheless - yes,
	 * arch/blackfin has MMU less SMP ...
	 */
	if (!IS_ENABLED(CONFIG_MMU)) {
		smp_mb(); /* explicit smp_mb(); (B) */
		return;
	}

435 436
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
437
		ihold(key->shared.inode); /* implies smp_mb(); (B) */
438 439
		break;
	case FUT_OFF_MMSHARED:
440
		futex_get_mm(key); /* implies smp_mb(); (B) */
441
		break;
442
	default:
443 444 445 446 447
		/*
		 * Private futexes do not hold reference on an inode or
		 * mm, therefore the only purpose of calling get_futex_key_refs
		 * is because we need the barrier for the lockless waiter check.
		 */
448
		smp_mb(); /* explicit smp_mb(); (B) */
449 450 451 452 453
	}
}

/*
 * Drop a reference to the resource addressed by a key.
454 455 456
 * The hash bucket spinlock must not be held. This is
 * a no-op for private futexes, see comment in the get
 * counterpart.
457 458 459
 */
static void drop_futex_key_refs(union futex_key *key)
{
460 461 462
	if (!key->both.ptr) {
		/* If we're here then we tried to put a key we failed to get */
		WARN_ON_ONCE(1);
463
		return;
464
	}
465

466 467 468
	if (!IS_ENABLED(CONFIG_MMU))
		return;

469 470 471 472 473 474 475 476 477 478
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
		iput(key->shared.inode);
		break;
	case FUT_OFF_MMSHARED:
		mmdrop(key->private.mm);
		break;
	}
}

E
Eric Dumazet 已提交
479
/**
480 481 482 483
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 * @key:	address where result is stored.
484 485
 * @rw:		mapping needs to be read/write (values: VERIFY_READ,
 *              VERIFY_WRITE)
E
Eric Dumazet 已提交
486
 *
487 488
 * Return: a negative error code or 0
 *
E
Eric Dumazet 已提交
489
 * The key words are stored in *key on success.
L
Linus Torvalds 已提交
490
 *
A
Al Viro 已提交
491
 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
L
Linus Torvalds 已提交
492 493 494
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
D
Darren Hart 已提交
495
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
496
 */
497
static int
498
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
L
Linus Torvalds 已提交
499
{
500
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
501
	struct mm_struct *mm = current->mm;
502
	struct page *page, *tail;
503
	struct address_space *mapping;
504
	int err, ro = 0;
L
Linus Torvalds 已提交
505 506 507 508

	/*
	 * The futex address must be "naturally" aligned.
	 */
509
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
510
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
511
		return -EINVAL;
512
	address -= key->both.offset;
L
Linus Torvalds 已提交
513

514 515 516
	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
		return -EFAULT;

517 518 519
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

E
Eric Dumazet 已提交
520 521 522 523 524 525 526 527 528 529
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		key->private.mm = mm;
		key->private.address = address;
530
		get_futex_key_refs(key);  /* implies smp_mb(); (B) */
E
Eric Dumazet 已提交
531 532
		return 0;
	}
L
Linus Torvalds 已提交
533

534
again:
535 536 537 538
	/* Ignore any VERIFY_READ mapping (futex common case) */
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

539
	err = get_user_pages_fast(address, 1, 1, &page);
540 541 542 543 544 545 546 547
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
	if (err == -EFAULT && rw == VERIFY_READ) {
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
548 549
	if (err < 0)
		return err;
550 551
	else
		err = 0;
552

553 554 555 556 557 558 559 560 561 562
	/*
	 * The treatment of mapping from this point on is critical. The page
	 * lock protects many things but in this context the page lock
	 * stabilizes mapping, prevents inode freeing in the shared
	 * file-backed region case and guards against movement to swap cache.
	 *
	 * Strictly speaking the page lock is not needed in all cases being
	 * considered here and page lock forces unnecessarily serialization
	 * From this point on, mapping will be re-verified if necessary and
	 * page lock will be acquired only if it is unavoidable
563 564 565 566 567 568 569
	 *
	 * Mapping checks require the head page for any compound page so the
	 * head page and mapping is looked up now. For anonymous pages, it
	 * does not matter if the page splits in the future as the key is
	 * based on the address. For filesystem-backed pages, the tail is
	 * required as the index of the page determines the key. For
	 * base pages, there is no tail page and tail == page.
570
	 */
571
	tail = page;
572 573 574
	page = compound_head(page);
	mapping = READ_ONCE(page->mapping);

575
	/*
576
	 * If page->mapping is NULL, then it cannot be a PageAnon
577 578 579 580 581 582 583 584 585 586 587
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
588
	 * an unlikely race, but we do need to retry for page->mapping.
589
	 */
590 591 592 593 594 595 596 597 598 599
	if (unlikely(!mapping)) {
		int shmem_swizzled;

		/*
		 * Page lock is required to identify which special case above
		 * applies. If this is really a shmem page then the page lock
		 * will prevent unexpected transitions.
		 */
		lock_page(page);
		shmem_swizzled = PageSwapCache(page) || page->mapping;
600 601
		unlock_page(page);
		put_page(page);
602

603 604
		if (shmem_swizzled)
			goto again;
605

606
		return -EFAULT;
607
	}
L
Linus Torvalds 已提交
608 609 610 611

	/*
	 * Private mappings are handled in a simple way.
	 *
612 613 614
	 * If the futex key is stored on an anonymous page, then the associated
	 * object is the mm which is implicitly pinned by the calling process.
	 *
L
Linus Torvalds 已提交
615 616
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
617
	 * the object not the particular process.
L
Linus Torvalds 已提交
618
	 */
619
	if (PageAnon(page)) {
620 621 622 623
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
624
		if (unlikely(should_fail_futex(fshared)) || ro) {
625 626 627 628
			err = -EFAULT;
			goto out;
		}

629
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
630
		key->private.mm = mm;
631
		key->private.address = address;
632 633 634

		get_futex_key_refs(key); /* implies smp_mb(); (B) */

635
	} else {
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
		struct inode *inode;

		/*
		 * The associated futex object in this case is the inode and
		 * the page->mapping must be traversed. Ordinarily this should
		 * be stabilised under page lock but it's not strictly
		 * necessary in this case as we just want to pin the inode, not
		 * update the radix tree or anything like that.
		 *
		 * The RCU read lock is taken as the inode is finally freed
		 * under RCU. If the mapping still matches expectations then the
		 * mapping->host can be safely accessed as being a valid inode.
		 */
		rcu_read_lock();

		if (READ_ONCE(page->mapping) != mapping) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		inode = READ_ONCE(mapping->host);
		if (!inode) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/*
		 * Take a reference unless it is about to be freed. Previously
		 * this reference was taken by ihold under the page lock
		 * pinning the inode in place so i_lock was unnecessary. The
		 * only way for this check to fail is if the inode was
		 * truncated in parallel so warn for now if this happens.
		 *
		 * We are not calling into get_futex_key_refs() in file-backed
		 * cases, therefore a successful atomic_inc return below will
		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
		 */
		if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/* Should be impossible but lets be paranoid for now */
		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
			err = -EFAULT;
			rcu_read_unlock();
			iput(inode);

			goto out;
		}

693
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
694
		key->shared.inode = inode;
695
		key->shared.pgoff = basepage_index(tail);
696
		rcu_read_unlock();
L
Linus Torvalds 已提交
697 698
	}

699
out:
700
	put_page(page);
701
	return err;
L
Linus Torvalds 已提交
702 703
}

704
static inline void put_futex_key(union futex_key *key)
L
Linus Torvalds 已提交
705
{
706
	drop_futex_key_refs(key);
L
Linus Torvalds 已提交
707 708
}

709 710
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
711 712 713 714 715
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
716
 * We have no generic implementation of a non-destructive write to the
717 718 719 720 721 722
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
723 724 725 726
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
727
	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
728
			       FAULT_FLAG_WRITE, NULL);
729 730
	up_read(&mm->mmap_sem);

731 732 733
	return ret < 0 ? ret : 0;
}

734 735
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
736 737
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

753 754
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
755
{
756
	int ret;
T
Thomas Gleixner 已提交
757 758

	pagefault_disable();
759
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
760 761
	pagefault_enable();

762
	return ret;
T
Thomas Gleixner 已提交
763 764 765
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
766 767 768
{
	int ret;

769
	pagefault_disable();
770
	ret = __get_user(*dest, from);
771
	pagefault_enable();
L
Linus Torvalds 已提交
772 773 774 775

	return ret ? -EFAULT : 0;
}

776 777 778 779 780 781 782 783 784 785 786

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

787
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
788 789 790 791 792 793 794 795

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
	atomic_set(&pi_state->refcount, 1);
796
	pi_state->key = FUTEX_KEY_INIT;
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812

	current->pi_state_cache = pi_state;

	return 0;
}

static struct futex_pi_state * alloc_pi_state(void)
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

813
/*
814 815 816
 * Drops a reference to the pi_state object and frees or caches it
 * when the last reference is gone.
 *
817 818
 * Must be called with the hb lock held.
 */
819
static void put_pi_state(struct futex_pi_state *pi_state)
820
{
821 822 823
	if (!pi_state)
		return;

824 825 826 827 828 829 830 831
	if (!atomic_dec_and_test(&pi_state->refcount))
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
832
		raw_spin_lock_irq(&pi_state->owner->pi_lock);
833
		list_del_init(&pi_state->list);
834
		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860

		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
	}

	if (current->pi_state_cache)
		kfree(pi_state);
	else {
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
		atomic_set(&pi_state->refcount, 1);
		current->pi_state_cache = pi_state;
	}
}

/*
 * Look up the task based on what TID userspace gave us.
 * We dont trust it.
 */
static struct task_struct * futex_find_get_task(pid_t pid)
{
	struct task_struct *p;

861
	rcu_read_lock();
862
	p = find_task_by_vpid(pid);
863 864
	if (p)
		get_task_struct(p);
865

866
	rcu_read_unlock();
867 868 869 870 871 872 873 874 875 876 877 878 879

	return p;
}

/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
void exit_pi_state_list(struct task_struct *curr)
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
880
	struct futex_hash_bucket *hb;
881
	union futex_key key = FUTEX_KEY_INIT;
882

883 884
	if (!futex_cmpxchg_enabled)
		return;
885 886 887
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
888
	 * versus waiters unqueueing themselves:
889
	 */
890
	raw_spin_lock_irq(&curr->pi_lock);
891 892 893 894 895
	while (!list_empty(head)) {

		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
896
		hb = hash_futex(&key);
897
		raw_spin_unlock_irq(&curr->pi_lock);
898 899 900

		spin_lock(&hb->lock);

901
		raw_spin_lock_irq(&curr->pi_lock);
902 903 904 905
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
906 907 908 909 910 911
		if (head->next != next) {
			spin_unlock(&hb->lock);
			continue;
		}

		WARN_ON(pi_state->owner != curr);
912 913
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
914
		pi_state->owner = NULL;
915
		raw_spin_unlock_irq(&curr->pi_lock);
916 917 918 919 920

		rt_mutex_unlock(&pi_state->pi_mutex);

		spin_unlock(&hb->lock);

921
		raw_spin_lock_irq(&curr->pi_lock);
922
	}
923
	raw_spin_unlock_irq(&curr->pi_lock);
924 925
}

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
/*
 * We need to check the following states:
 *
 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 *
 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 *
 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 *
 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 *
 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 *
 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 *
 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 *
 * [1]	Indicates that the kernel can acquire the futex atomically. We
 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 *
 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 *      thread is found then it indicates that the owner TID has died.
 *
 * [3]	Invalid. The waiter is queued on a non PI futex
 *
 * [4]	Valid state after exit_robust_list(), which sets the user space
 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 *
 * [5]	The user space value got manipulated between exit_robust_list()
 *	and exit_pi_state_list()
 *
 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 *	the pi_state but cannot access the user space value.
 *
 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 *
 * [8]	Owner and user space value match
 *
 * [9]	There is no transient state which sets the user space TID to 0
 *	except exit_robust_list(), but this is indicated by the
 *	FUTEX_OWNER_DIED bit. See [4]
 *
 * [10] There is no transient state which leaves owner and user space
 *	TID out of sync.
 */
975 976 977 978 979 980 981 982

/*
 * Validate that the existing waiter has a pi_state and sanity check
 * the pi_state against the user space value. If correct, attach to
 * it.
 */
static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
			      struct futex_pi_state **ps)
983
{
984
	pid_t pid = uval & FUTEX_TID_MASK;
985

986 987 988 989 990
	/*
	 * Userspace might have messed up non-PI and PI futexes [3]
	 */
	if (unlikely(!pi_state))
		return -EINVAL;
991

992
	WARN_ON(!atomic_read(&pi_state->refcount));
993

994 995 996 997
	/*
	 * Handle the owner died case:
	 */
	if (uval & FUTEX_OWNER_DIED) {
998
		/*
999 1000 1001
		 * exit_pi_state_list sets owner to NULL and wakes the
		 * topmost waiter. The task which acquires the
		 * pi_state->rt_mutex will fixup owner.
1002
		 */
1003
		if (!pi_state->owner) {
1004
			/*
1005 1006
			 * No pi state owner, but the user space TID
			 * is not 0. Inconsistent state. [5]
1007
			 */
1008 1009
			if (pid)
				return -EINVAL;
1010
			/*
1011
			 * Take a ref on the state and return success. [4]
1012
			 */
1013
			goto out_state;
1014
		}
1015 1016

		/*
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
		 * If TID is 0, then either the dying owner has not
		 * yet executed exit_pi_state_list() or some waiter
		 * acquired the rtmutex in the pi state, but did not
		 * yet fixup the TID in user space.
		 *
		 * Take a ref on the state and return success. [6]
		 */
		if (!pid)
			goto out_state;
	} else {
		/*
		 * If the owner died bit is not set, then the pi_state
		 * must have an owner. [7]
1030
		 */
1031
		if (!pi_state->owner)
1032
			return -EINVAL;
1033 1034
	}

1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	/*
	 * Bail out if user space manipulated the futex value. If pi
	 * state exists then the owner TID must be the same as the
	 * user space TID. [9/10]
	 */
	if (pid != task_pid_vnr(pi_state->owner))
		return -EINVAL;
out_state:
	atomic_inc(&pi_state->refcount);
	*ps = pi_state;
	return 0;
}

1048 1049 1050 1051 1052 1053
/*
 * Lookup the task for the TID provided from user space and attach to
 * it after doing proper sanity checks.
 */
static int attach_to_pi_owner(u32 uval, union futex_key *key,
			      struct futex_pi_state **ps)
1054 1055
{
	pid_t pid = uval & FUTEX_TID_MASK;
1056 1057
	struct futex_pi_state *pi_state;
	struct task_struct *p;
1058

1059
	/*
1060
	 * We are the first waiter - try to look up the real owner and attach
1061
	 * the new pi_state to it, but bail out when TID = 0 [1]
1062
	 */
1063
	if (!pid)
1064
		return -ESRCH;
1065
	p = futex_find_get_task(pid);
1066 1067
	if (!p)
		return -ESRCH;
1068

1069
	if (unlikely(p->flags & PF_KTHREAD)) {
1070 1071 1072 1073
		put_task_struct(p);
		return -EPERM;
	}

1074 1075 1076 1077 1078 1079
	/*
	 * We need to look at the task state flags to figure out,
	 * whether the task is exiting. To protect against the do_exit
	 * change of the task flags, we do this protected by
	 * p->pi_lock:
	 */
1080
	raw_spin_lock_irq(&p->pi_lock);
1081 1082 1083 1084 1085 1086 1087 1088
	if (unlikely(p->flags & PF_EXITING)) {
		/*
		 * The task is on the way out. When PF_EXITPIDONE is
		 * set, we know that the task has finished the
		 * cleanup:
		 */
		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;

1089
		raw_spin_unlock_irq(&p->pi_lock);
1090 1091 1092
		put_task_struct(p);
		return ret;
	}
1093

1094 1095 1096
	/*
	 * No existing pi state. First waiter. [2]
	 */
1097 1098 1099
	pi_state = alloc_pi_state();

	/*
1100
	 * Initialize the pi_mutex in locked state and make @p
1101 1102 1103 1104 1105
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
1106
	pi_state->key = *key;
1107

1108
	WARN_ON(!list_empty(&pi_state->list));
1109 1110
	list_add(&pi_state->list, &p->pi_state_list);
	pi_state->owner = p;
1111
	raw_spin_unlock_irq(&p->pi_lock);
1112 1113 1114

	put_task_struct(p);

P
Pierre Peiffer 已提交
1115
	*ps = pi_state;
1116 1117 1118 1119

	return 0;
}

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
			   union futex_key *key, struct futex_pi_state **ps)
{
	struct futex_q *match = futex_top_waiter(hb, key);

	/*
	 * If there is a waiter on that futex, validate it and
	 * attach to the pi_state when the validation succeeds.
	 */
	if (match)
		return attach_to_pi_state(uval, match->pi_state, ps);

	/*
	 * We are the first waiter - try to look up the owner based on
	 * @uval and attach to it.
	 */
	return attach_to_pi_owner(uval, key, ps);
}

1139 1140 1141 1142
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
	u32 uninitialized_var(curval);

1143 1144 1145
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1146 1147 1148 1149 1150 1151 1152
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
		return -EFAULT;

	/*If user space value changed, let the caller retry */
	return curval != uval ? -EAGAIN : 0;
}

1153
/**
1154
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1155 1156 1157 1158 1159 1160 1161 1162
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1163
 *
1164 1165 1166
 * Return:
 *  0 - ready to wait;
 *  1 - acquired the lock;
1167 1168 1169 1170 1171 1172 1173
 * <0 - error
 *
 * The hb->lock and futex_key refs shall be held by the caller.
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
1174
				struct task_struct *task, int set_waiters)
1175
{
1176 1177 1178
	u32 uval, newval, vpid = task_pid_vnr(task);
	struct futex_q *match;
	int ret;
1179 1180

	/*
1181 1182
	 * Read the user space value first so we can validate a few
	 * things before proceeding further.
1183
	 */
1184
	if (get_futex_value_locked(&uval, uaddr))
1185 1186
		return -EFAULT;

1187 1188 1189
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1190 1191 1192
	/*
	 * Detect deadlocks.
	 */
1193
	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1194 1195
		return -EDEADLK;

1196 1197 1198
	if ((unlikely(should_fail_futex(true))))
		return -EDEADLK;

1199
	/*
1200 1201
	 * Lookup existing state first. If it exists, try to attach to
	 * its pi_state.
1202
	 */
1203 1204 1205
	match = futex_top_waiter(hb, key);
	if (match)
		return attach_to_pi_state(uval, match->pi_state, ps);
1206 1207

	/*
1208 1209 1210 1211
	 * No waiter and user TID is 0. We are here because the
	 * waiters or the owner died bit is set or called from
	 * requeue_cmp_pi or for whatever reason something took the
	 * syscall.
1212
	 */
1213
	if (!(uval & FUTEX_TID_MASK)) {
1214
		/*
1215 1216
		 * We take over the futex. No other waiters and the user space
		 * TID is 0. We preserve the owner died bit.
1217
		 */
1218 1219
		newval = uval & FUTEX_OWNER_DIED;
		newval |= vpid;
1220

1221 1222 1223 1224 1225 1226 1227 1228
		/* The futex requeue_pi code can enforce the waiters bit */
		if (set_waiters)
			newval |= FUTEX_WAITERS;

		ret = lock_pi_update_atomic(uaddr, uval, newval);
		/* If the take over worked, return 1 */
		return ret < 0 ? ret : 1;
	}
1229 1230

	/*
1231 1232 1233
	 * First waiter. Set the waiters bit before attaching ourself to
	 * the owner. If owner tries to unlock, it will be forced into
	 * the kernel and blocked on hb->lock.
1234
	 */
1235 1236 1237 1238
	newval = uval | FUTEX_WAITERS;
	ret = lock_pi_update_atomic(uaddr, uval, newval);
	if (ret)
		return ret;
1239
	/*
1240 1241 1242
	 * If the update of the user space value succeeded, we try to
	 * attach to the owner. If that fails, no harm done, we only
	 * set the FUTEX_WAITERS bit in the user space variable.
1243
	 */
1244
	return attach_to_pi_owner(uval, key, ps);
1245 1246
}

1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

1257 1258
	if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
	    || WARN_ON(plist_node_empty(&q->list)))
1259 1260 1261 1262
		return;

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
1263
	hb_waiters_dec(hb);
1264 1265
}

L
Linus Torvalds 已提交
1266 1267
/*
 * The hash bucket lock must be held when this is called.
1268 1269 1270
 * Afterwards, the futex_q must not be accessed. Callers
 * must ensure to later call wake_up_q() for the actual
 * wakeups to occur.
L
Linus Torvalds 已提交
1271
 */
1272
static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
L
Linus Torvalds 已提交
1273
{
T
Thomas Gleixner 已提交
1274 1275
	struct task_struct *p = q->task;

1276 1277 1278
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

L
Linus Torvalds 已提交
1279
	/*
1280 1281
	 * Queue the task for later wakeup for after we've released
	 * the hb->lock. wake_q_add() grabs reference to p.
L
Linus Torvalds 已提交
1282
	 */
1283
	wake_q_add(wake_q, p);
1284
	__unqueue_futex(q);
L
Linus Torvalds 已提交
1285
	/*
T
Thomas Gleixner 已提交
1286 1287 1288 1289
	 * The waiting task can free the futex_q as soon as
	 * q->lock_ptr = NULL is written, without taking any locks. A
	 * memory barrier is required here to prevent the following
	 * store to lock_ptr from getting ahead of the plist_del.
L
Linus Torvalds 已提交
1290
	 */
1291
	smp_wmb();
L
Linus Torvalds 已提交
1292 1293 1294
	q->lock_ptr = NULL;
}

1295 1296
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
			 struct futex_hash_bucket *hb)
1297 1298 1299
{
	struct task_struct *new_owner;
	struct futex_pi_state *pi_state = this->pi_state;
1300
	u32 uninitialized_var(curval), newval;
1301 1302
	WAKE_Q(wake_q);
	bool deboost;
1303
	int ret = 0;
1304 1305 1306 1307

	if (!pi_state)
		return -EINVAL;

1308 1309 1310 1311 1312 1313 1314
	/*
	 * If current does not own the pi_state then the futex is
	 * inconsistent and user space fiddled with the futex value.
	 */
	if (pi_state->owner != current)
		return -EINVAL;

1315
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1316 1317 1318
	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);

	/*
1319 1320 1321
	 * It is possible that the next waiter (the one that brought
	 * this owner to the kernel) timed out and is no longer
	 * waiting on the lock.
1322 1323 1324 1325 1326
	 */
	if (!new_owner)
		new_owner = this->task;

	/*
1327 1328 1329
	 * We pass it to the next owner. The WAITERS bit is always
	 * kept enabled while there is PI state around. We cleanup the
	 * owner died bit, because we are the owner.
1330
	 */
1331
	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1332

1333 1334 1335
	if (unlikely(should_fail_futex(true)))
		ret = -EFAULT;

1336
	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1337
		ret = -EFAULT;
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
	} else if (curval != uval) {
		/*
		 * If a unconditional UNLOCK_PI operation (user space did not
		 * try the TID->0 transition) raced with a waiter setting the
		 * FUTEX_WAITERS flag between get_user() and locking the hash
		 * bucket lock, retry the operation.
		 */
		if ((FUTEX_TID_MASK & curval) == uval)
			ret = -EAGAIN;
		else
			ret = -EINVAL;
	}
1350
	if (ret) {
1351
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1352
		return ret;
1353
	}
1354

1355
	raw_spin_lock(&pi_state->owner->pi_lock);
1356 1357
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
1358
	raw_spin_unlock(&pi_state->owner->pi_lock);
1359

1360
	raw_spin_lock(&new_owner->pi_lock);
1361
	WARN_ON(!list_empty(&pi_state->list));
1362 1363
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
1364
	raw_spin_unlock(&new_owner->pi_lock);
1365

1366
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379

	deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);

	/*
	 * First unlock HB so the waiter does not spin on it once he got woken
	 * up. Second wake up the waiter before the priority is adjusted. If we
	 * deboost first (and lose our higher priority), then the task might get
	 * scheduled away before the wake up can take place.
	 */
	spin_unlock(&hb->lock);
	wake_up_q(&wake_q);
	if (deboost)
		rt_mutex_adjust_prio(current);
1380 1381 1382 1383

	return 0;
}

I
Ingo Molnar 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
1400 1401 1402
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
1403
	spin_unlock(&hb1->lock);
1404 1405
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
1406 1407
}

L
Linus Torvalds 已提交
1408
/*
D
Darren Hart 已提交
1409
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
1410
 */
1411 1412
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
1413
{
1414
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1415
	struct futex_q *this, *next;
1416
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
1417
	int ret;
1418
	WAKE_Q(wake_q);
L
Linus Torvalds 已提交
1419

1420 1421 1422
	if (!bitset)
		return -EINVAL;

1423
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
L
Linus Torvalds 已提交
1424 1425 1426
	if (unlikely(ret != 0))
		goto out;

1427
	hb = hash_futex(&key);
1428 1429 1430 1431 1432

	/* Make sure we really have tasks to wakeup */
	if (!hb_waiters_pending(hb))
		goto out_put_key;

1433
	spin_lock(&hb->lock);
L
Linus Torvalds 已提交
1434

J
Jason Low 已提交
1435
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
L
Linus Torvalds 已提交
1436
		if (match_futex (&this->key, &key)) {
1437
			if (this->pi_state || this->rt_waiter) {
1438 1439 1440
				ret = -EINVAL;
				break;
			}
1441 1442 1443 1444 1445

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

1446
			mark_wake_futex(&wake_q, this);
L
Linus Torvalds 已提交
1447 1448 1449 1450 1451
			if (++ret >= nr_wake)
				break;
		}
	}

1452
	spin_unlock(&hb->lock);
1453
	wake_up_q(&wake_q);
1454
out_put_key:
1455
	put_futex_key(&key);
1456
out:
L
Linus Torvalds 已提交
1457 1458 1459
	return ret;
}

1460 1461 1462 1463
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1464
static int
1465
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1466
	      int nr_wake, int nr_wake2, int op)
1467
{
1468
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1469
	struct futex_hash_bucket *hb1, *hb2;
1470
	struct futex_q *this, *next;
D
Darren Hart 已提交
1471
	int ret, op_ret;
1472
	WAKE_Q(wake_q);
1473

D
Darren Hart 已提交
1474
retry:
1475
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1476 1477
	if (unlikely(ret != 0))
		goto out;
1478
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1479
	if (unlikely(ret != 0))
1480
		goto out_put_key1;
1481

1482 1483
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1484

D
Darren Hart 已提交
1485
retry_private:
T
Thomas Gleixner 已提交
1486
	double_lock_hb(hb1, hb2);
1487
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1488 1489
	if (unlikely(op_ret < 0)) {

D
Darren Hart 已提交
1490
		double_unlock_hb(hb1, hb2);
1491

1492
#ifndef CONFIG_MMU
1493 1494 1495 1496
		/*
		 * we don't get EFAULT from MMU faults if we don't have an MMU,
		 * but we might get them from range checking
		 */
1497
		ret = op_ret;
1498
		goto out_put_keys;
1499 1500
#endif

1501 1502
		if (unlikely(op_ret != -EFAULT)) {
			ret = op_ret;
1503
			goto out_put_keys;
1504 1505
		}

1506
		ret = fault_in_user_writeable(uaddr2);
1507
		if (ret)
1508
			goto out_put_keys;
1509

1510
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1511 1512
			goto retry_private;

1513 1514
		put_futex_key(&key2);
		put_futex_key(&key1);
D
Darren Hart 已提交
1515
		goto retry;
1516 1517
	}

J
Jason Low 已提交
1518
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1519
		if (match_futex (&this->key, &key1)) {
1520 1521 1522 1523
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1524
			mark_wake_futex(&wake_q, this);
1525 1526 1527 1528 1529 1530 1531
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
		op_ret = 0;
J
Jason Low 已提交
1532
		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1533
			if (match_futex (&this->key, &key2)) {
1534 1535 1536 1537
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1538
				mark_wake_futex(&wake_q, this);
1539 1540 1541 1542 1543 1544 1545
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1546
out_unlock:
D
Darren Hart 已提交
1547
	double_unlock_hb(hb1, hb2);
1548
	wake_up_q(&wake_q);
1549
out_put_keys:
1550
	put_futex_key(&key2);
1551
out_put_key1:
1552
	put_futex_key(&key1);
1553
out:
1554 1555 1556
	return ret;
}

D
Darren Hart 已提交
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
1575 1576
		hb_waiters_dec(hb1);
		hb_waiters_inc(hb2);
1577
		plist_add(&q->list, &hb2->chain);
D
Darren Hart 已提交
1578 1579 1580 1581 1582 1583
		q->lock_ptr = &hb2->lock;
	}
	get_futex_key_refs(key2);
	q->key = *key2;
}

1584 1585
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1586 1587 1588
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1589 1590 1591 1592 1593
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1594 1595 1596
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1597 1598
 */
static inline
1599 1600
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1601 1602 1603 1604
{
	get_futex_key_refs(key);
	q->key = *key;

1605
	__unqueue_futex(q);
1606 1607 1608 1609

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1610 1611
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1612
	wake_up_state(q->task, TASK_NORMAL);
1613 1614 1615 1616
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1617 1618 1619 1620 1621 1622 1623
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1624 1625
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1626 1627 1628
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1629
 *
1630 1631
 * Return:
 *  0 - failed to acquire the lock atomically;
1632
 * >0 - acquired the lock, return value is vpid of the top_waiter
1633 1634 1635 1636 1637 1638
 * <0 - error
 */
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
				 struct futex_hash_bucket *hb1,
				 struct futex_hash_bucket *hb2,
				 union futex_key *key1, union futex_key *key2,
1639
				 struct futex_pi_state **ps, int set_waiters)
1640
{
1641
	struct futex_q *top_waiter = NULL;
1642
	u32 curval;
1643
	int ret, vpid;
1644 1645 1646 1647

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1648 1649 1650
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1651 1652 1653 1654 1655 1656 1657 1658
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1659 1660 1661 1662 1663 1664
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1665 1666 1667 1668
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1669
	/*
1670 1671 1672
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1673
	 */
1674
	vpid = task_pid_vnr(top_waiter->task);
1675 1676
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
				   set_waiters);
1677
	if (ret == 1) {
1678
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1679 1680
		return vpid;
	}
1681 1682 1683 1684 1685
	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1686
 * @uaddr1:	source futex user address
1687
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1688 1689 1690 1691 1692
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1693
 *		pi futex (pi to pi requeue is not supported)
1694 1695 1696 1697
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1698 1699
 * Return:
 * >=0 - on success, the number of tasks requeued or woken;
1700
 *  <0 - on error
L
Linus Torvalds 已提交
1701
 */
1702 1703 1704
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
1705
{
1706
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1707 1708
	int drop_count = 0, task_count = 0, ret;
	struct futex_pi_state *pi_state = NULL;
1709
	struct futex_hash_bucket *hb1, *hb2;
L
Linus Torvalds 已提交
1710
	struct futex_q *this, *next;
1711
	WAKE_Q(wake_q);
1712 1713

	if (requeue_pi) {
1714 1715 1716 1717 1718 1719 1720
		/*
		 * Requeue PI only works on two distinct uaddrs. This
		 * check is only valid for private futexes. See below.
		 */
		if (uaddr1 == uaddr2)
			return -EINVAL;

1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
1740

1741
retry:
1742
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
L
Linus Torvalds 已提交
1743 1744
	if (unlikely(ret != 0))
		goto out;
1745 1746
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
			    requeue_pi ? VERIFY_WRITE : VERIFY_READ);
L
Linus Torvalds 已提交
1747
	if (unlikely(ret != 0))
1748
		goto out_put_key1;
L
Linus Torvalds 已提交
1749

1750 1751 1752 1753 1754 1755 1756 1757 1758
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (requeue_pi && match_futex(&key1, &key2)) {
		ret = -EINVAL;
		goto out_put_keys;
	}

1759 1760
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
1761

D
Darren Hart 已提交
1762
retry_private:
1763
	hb_waiters_inc(hb2);
I
Ingo Molnar 已提交
1764
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1765

1766 1767
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
1768

1769
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
1770 1771

		if (unlikely(ret)) {
D
Darren Hart 已提交
1772
			double_unlock_hb(hb1, hb2);
1773
			hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
1774

1775
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
1776 1777
			if (ret)
				goto out_put_keys;
L
Linus Torvalds 已提交
1778

1779
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1780
				goto retry_private;
L
Linus Torvalds 已提交
1781

1782 1783
			put_futex_key(&key2);
			put_futex_key(&key1);
D
Darren Hart 已提交
1784
			goto retry;
L
Linus Torvalds 已提交
1785
		}
1786
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
1787 1788 1789 1790 1791
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

1792
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1793 1794 1795 1796 1797 1798
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
1799
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1800
						 &key2, &pi_state, nr_requeue);
1801 1802 1803 1804 1805

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
1806 1807
		 * reference to it. If the lock was taken, ret contains the
		 * vpid of the top waiter task.
1808 1809
		 * If the lock was not taken, we have pi_state and an initial
		 * refcount on it. In case of an error we have nothing.
1810
		 */
1811
		if (ret > 0) {
1812
			WARN_ON(pi_state);
1813
			drop_count++;
1814
			task_count++;
1815
			/*
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
			 * If we acquired the lock, then the user space value
			 * of uaddr2 should be vpid. It cannot be changed by
			 * the top waiter as it is blocked on hb2 lock if it
			 * tries to do so. If something fiddled with it behind
			 * our back the pi state lookup might unearth it. So
			 * we rather use the known value than rereading and
			 * handing potential crap to lookup_pi_state.
			 *
			 * If that call succeeds then we have pi_state and an
			 * initial refcount on it.
1826
			 */
1827
			ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1828 1829 1830 1831
		}

		switch (ret) {
		case 0:
1832
			/* We hold a reference on the pi state. */
1833
			break;
1834 1835

			/* If the above failed, then pi_state is NULL */
1836 1837
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
1838
			hb_waiters_dec(hb2);
1839 1840
			put_futex_key(&key2);
			put_futex_key(&key1);
1841
			ret = fault_in_user_writeable(uaddr2);
1842 1843 1844 1845
			if (!ret)
				goto retry;
			goto out;
		case -EAGAIN:
1846 1847 1848 1849 1850 1851
			/*
			 * Two reasons for this:
			 * - Owner is exiting and we just wait for the
			 *   exit to complete.
			 * - The user space value changed.
			 */
1852
			double_unlock_hb(hb1, hb2);
1853
			hb_waiters_dec(hb2);
1854 1855
			put_futex_key(&key2);
			put_futex_key(&key1);
1856 1857 1858 1859 1860 1861 1862
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

J
Jason Low 已提交
1863
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1864 1865 1866 1867
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
1868
			continue;
1869

1870 1871 1872
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
1873 1874 1875
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
1876 1877
		 */
		if ((requeue_pi && !this->rt_waiter) ||
1878 1879
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
1880 1881 1882
			ret = -EINVAL;
			break;
		}
1883 1884 1885 1886 1887 1888 1889

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
1890
			mark_wake_futex(&wake_q, this);
1891 1892
			continue;
		}
L
Linus Torvalds 已提交
1893

1894 1895 1896 1897 1898 1899
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

1900 1901 1902 1903 1904
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
1905 1906 1907 1908 1909
			/*
			 * Prepare the waiter to take the rt_mutex. Take a
			 * refcount on the pi_state and store the pointer in
			 * the futex_q object of the waiter.
			 */
1910 1911 1912 1913
			atomic_inc(&pi_state->refcount);
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
1914
							this->task);
1915
			if (ret == 1) {
1916 1917 1918 1919 1920 1921 1922 1923
				/*
				 * We got the lock. We do neither drop the
				 * refcount on pi_state nor clear
				 * this->pi_state because the waiter needs the
				 * pi_state for cleaning up the user space
				 * value. It will drop the refcount after
				 * doing so.
				 */
1924
				requeue_pi_wake_futex(this, &key2, hb2);
1925
				drop_count++;
1926 1927
				continue;
			} else if (ret) {
1928 1929 1930 1931 1932 1933 1934 1935
				/*
				 * rt_mutex_start_proxy_lock() detected a
				 * potential deadlock when we tried to queue
				 * that waiter. Drop the pi_state reference
				 * which we took above and remove the pointer
				 * to the state from the waiters futex_q
				 * object.
				 */
1936
				this->pi_state = NULL;
1937
				put_pi_state(pi_state);
1938 1939 1940 1941 1942
				/*
				 * We stop queueing more waiters and let user
				 * space deal with the mess.
				 */
				break;
1943
			}
L
Linus Torvalds 已提交
1944
		}
1945 1946
		requeue_futex(this, hb1, hb2, &key2);
		drop_count++;
L
Linus Torvalds 已提交
1947 1948
	}

1949 1950 1951 1952 1953
	/*
	 * We took an extra initial reference to the pi_state either
	 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
	 * need to drop it here again.
	 */
1954
	put_pi_state(pi_state);
1955 1956

out_unlock:
D
Darren Hart 已提交
1957
	double_unlock_hb(hb1, hb2);
1958
	wake_up_q(&wake_q);
1959
	hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
1960

1961 1962 1963 1964 1965 1966
	/*
	 * drop_futex_key_refs() must be called outside the spinlocks. During
	 * the requeue we moved futex_q's from the hash bucket at key1 to the
	 * one at key2 and updated their key pointer.  We no longer need to
	 * hold the references to key1.
	 */
L
Linus Torvalds 已提交
1967
	while (--drop_count >= 0)
1968
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
1969

1970
out_put_keys:
1971
	put_futex_key(&key2);
1972
out_put_key1:
1973
	put_futex_key(&key1);
1974
out:
1975
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
1976 1977 1978
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
1979
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1980
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
1981
{
1982
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1983

1984
	hb = hash_futex(&q->key);
1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995

	/*
	 * Increment the counter before taking the lock so that
	 * a potential waker won't miss a to-be-slept task that is
	 * waiting for the spinlock. This is safe as all queue_lock()
	 * users end up calling queue_me(). Similarly, for housekeeping,
	 * decrement the counter at queue_unlock() when some error has
	 * occurred and we don't end up adding the task to the list.
	 */
	hb_waiters_inc(hb);

1996
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
1997

1998
	spin_lock(&hb->lock); /* implies smp_mb(); (A) */
1999
	return hb;
L
Linus Torvalds 已提交
2000 2001
}

2002
static inline void
J
Jason Low 已提交
2003
queue_unlock(struct futex_hash_bucket *hb)
2004
	__releases(&hb->lock)
2005 2006
{
	spin_unlock(&hb->lock);
2007
	hb_waiters_dec(hb);
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
E
Eric Sesterhenn 已提交
2022
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2023
	__releases(&hb->lock)
L
Linus Torvalds 已提交
2024
{
P
Pierre Peiffer 已提交
2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
2039
	q->task = current;
2040
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
2041 2042
}

2043 2044 2045 2046 2047 2048 2049
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
2050 2051
 * Return:
 *   1 - if the futex_q was still queued (and we removed unqueued it);
2052
 *   0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
2053 2054 2055 2056
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
2057
	int ret = 0;
L
Linus Torvalds 已提交
2058 2059

	/* In the common case we don't take the spinlock, which is nice. */
2060
retry:
2061 2062 2063 2064 2065 2066
	/*
	 * q->lock_ptr can change between this read and the following spin_lock.
	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
	 * optimizing lock_ptr out of the logic below.
	 */
	lock_ptr = READ_ONCE(q->lock_ptr);
2067
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
2086
		__unqueue_futex(q);
2087 2088 2089

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
2090 2091 2092 2093
		spin_unlock(lock_ptr);
		ret = 1;
	}

2094
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
2095 2096 2097
	return ret;
}

2098 2099
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
2100 2101
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
2102
 */
P
Pierre Peiffer 已提交
2103
static void unqueue_me_pi(struct futex_q *q)
2104
	__releases(q->lock_ptr)
2105
{
2106
	__unqueue_futex(q);
2107 2108

	BUG_ON(!q->pi_state);
2109
	put_pi_state(q->pi_state);
2110 2111
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
2112
	spin_unlock(q->lock_ptr);
2113 2114
}

P
Pierre Peiffer 已提交
2115
/*
2116
 * Fixup the pi_state owner with the new owner.
P
Pierre Peiffer 已提交
2117
 *
2118 2119
 * Must be called with hash bucket lock held and mm->sem held for non
 * private futexes.
P
Pierre Peiffer 已提交
2120
 */
2121
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2122
				struct task_struct *newowner)
P
Pierre Peiffer 已提交
2123
{
2124
	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Pierre Peiffer 已提交
2125
	struct futex_pi_state *pi_state = q->pi_state;
2126
	struct task_struct *oldowner = pi_state->owner;
2127
	u32 uval, uninitialized_var(curval), newval;
D
Darren Hart 已提交
2128
	int ret;
P
Pierre Peiffer 已提交
2129 2130

	/* Owner died? */
2131 2132 2133 2134 2135
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;

	/*
	 * We are here either because we stole the rtmutex from the
2136 2137 2138 2139
	 * previous highest priority waiter or we are the highest priority
	 * waiter but failed to get the rtmutex the first time.
	 * We have to replace the newowner TID in the user space variable.
	 * This must be atomic as we have to preserve the owner died bit here.
2140
	 *
D
Darren Hart 已提交
2141 2142 2143
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
	 *
	 * Modifying pi_state _before_ the user space value would
	 * leave the pi_state in an inconsistent state when we fault
	 * here, because we need to drop the hash bucket lock to
	 * handle the fault. This might be observed in the PID check
	 * in lookup_pi_state.
	 */
retry:
	if (get_futex_value_locked(&uval, uaddr))
		goto handle_fault;

	while (1) {
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

2158
		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
			goto handle_fault;
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
2169
	if (pi_state->owner != NULL) {
2170
		raw_spin_lock_irq(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
2171 2172
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
2173
		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
2174
	}
P
Pierre Peiffer 已提交
2175

2176
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
2177

2178
	raw_spin_lock_irq(&newowner->pi_lock);
P
Pierre Peiffer 已提交
2179
	WARN_ON(!list_empty(&pi_state->list));
2180
	list_add(&pi_state->list, &newowner->pi_state_list);
2181
	raw_spin_unlock_irq(&newowner->pi_lock);
2182
	return 0;
P
Pierre Peiffer 已提交
2183 2184

	/*
2185
	 * To handle the page fault we need to drop the hash bucket
2186 2187
	 * lock here. That gives the other task (either the highest priority
	 * waiter itself or the task which stole the rtmutex) the
2188 2189 2190 2191 2192
	 * chance to try the fixup of the pi_state. So once we are
	 * back from handling the fault we need to check the pi_state
	 * after reacquiring the hash bucket lock and before trying to
	 * do another fixup. When the fixup has been done already we
	 * simply return.
P
Pierre Peiffer 已提交
2193
	 */
2194 2195
handle_fault:
	spin_unlock(q->lock_ptr);
2196

2197
	ret = fault_in_user_writeable(uaddr);
2198

2199
	spin_lock(q->lock_ptr);
2200

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210
	/*
	 * Check if someone else fixed it for us:
	 */
	if (pi_state->owner != oldowner)
		return 0;

	if (ret)
		return ret;

	goto retry;
P
Pierre Peiffer 已提交
2211 2212
}

N
Nick Piggin 已提交
2213
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
2214

2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
2225 2226 2227
 * Return:
 *  1 - success, lock taken;
 *  0 - success, lock not taken;
2228 2229
 * <0 - on error (-EFAULT)
 */
2230
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
{
	struct task_struct *owner;
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
		 */
		if (q->pi_state->owner != current)
2241
			ret = fixup_pi_state_owner(uaddr, q, current);
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
		goto out;
	}

	/*
	 * Catch the rare case, where the lock was released when we were on the
	 * way back before we locked the hash bucket.
	 */
	if (q->pi_state->owner == current) {
		/*
		 * Try to get the rt_mutex now. This might fail as some other
		 * task acquired the rt_mutex after we removed ourself from the
		 * rt_mutex waiters list.
		 */
		if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
			locked = 1;
			goto out;
		}

		/*
		 * pi_state is incorrect, some other task did a lock steal and
		 * we returned due to timeout or signal without taking the
2263
		 * rt_mutex. Too late.
2264
		 */
2265
		raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
2266
		owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2267 2268
		if (!owner)
			owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2269
		raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
2270
		ret = fixup_pi_state_owner(uaddr, q, owner);
2271 2272 2273 2274 2275
		goto out;
	}

	/*
	 * Paranoia check. If we did not take the lock, then we should not be
2276
	 * the owner of the rt_mutex.
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
	 */
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);

out:
	return ret ? ret : locked;
}

2288 2289 2290 2291 2292 2293 2294
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
2295
				struct hrtimer_sleeper *timeout)
2296
{
2297 2298
	/*
	 * The task state is guaranteed to be set before another task can
2299
	 * wake it. set_current_state() is implemented using smp_store_mb() and
2300 2301 2302
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
2303
	set_current_state(TASK_INTERRUPTIBLE);
2304
	queue_me(q, hb);
2305 2306

	/* Arm the timer */
2307
	if (timeout)
2308 2309 2310
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);

	/*
2311 2312
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
2313 2314 2315 2316 2317 2318 2319 2320
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
2321
			freezable_schedule();
2322 2323 2324 2325
	}
	__set_current_state(TASK_RUNNING);
}

2326 2327 2328 2329
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
2330
 * @flags:	futex flags (FLAGS_SHARED, etc.)
2331 2332 2333 2334 2335 2336 2337 2338
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
2339 2340
 * Return:
 *  0 - uaddr contains val and hb has been locked;
2341
 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2342
 */
2343
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2344
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
2345
{
2346 2347
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
2348 2349

	/*
D
Darren Hart 已提交
2350
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
2351 2352 2353 2354 2355 2356 2357
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
2358 2359
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
2360 2361
	 * cond(var) false, which would violate the guarantee.
	 *
2362 2363 2364 2365
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
2366
	 */
2367
retry:
2368
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2369
	if (unlikely(ret != 0))
2370
		return ret;
2371 2372 2373 2374

retry_private:
	*hb = queue_lock(q);

2375
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
2376

2377
	if (ret) {
J
Jason Low 已提交
2378
		queue_unlock(*hb);
L
Linus Torvalds 已提交
2379

2380
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
2381
		if (ret)
2382
			goto out;
L
Linus Torvalds 已提交
2383

2384
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2385 2386
			goto retry_private;

2387
		put_futex_key(&q->key);
D
Darren Hart 已提交
2388
		goto retry;
L
Linus Torvalds 已提交
2389
	}
2390

2391
	if (uval != val) {
J
Jason Low 已提交
2392
		queue_unlock(*hb);
2393
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
2394
	}
L
Linus Torvalds 已提交
2395

2396 2397
out:
	if (ret)
2398
		put_futex_key(&q->key);
2399 2400 2401
	return ret;
}

2402 2403
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
2404 2405 2406 2407
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
2408
	struct futex_q q = futex_q_init;
2409 2410 2411 2412 2413 2414 2415 2416 2417
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

	if (abs_time) {
		to = &timeout;

2418 2419 2420
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2421 2422 2423 2424 2425
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

T
Thomas Gleixner 已提交
2426
retry:
2427 2428 2429 2430
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
2431
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2432 2433 2434
	if (ret)
		goto out;

2435
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
2436
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
2437 2438

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
2439
	ret = 0;
2440
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
2441
	if (!unqueue_me(&q))
2442
		goto out;
P
Peter Zijlstra 已提交
2443
	ret = -ETIMEDOUT;
2444
	if (to && !to->task)
2445
		goto out;
N
Nick Piggin 已提交
2446

2447
	/*
T
Thomas Gleixner 已提交
2448 2449
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
2450
	 */
2451
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
2452 2453
		goto retry;

P
Peter Zijlstra 已提交
2454
	ret = -ERESTARTSYS;
2455
	if (!abs_time)
2456
		goto out;
L
Linus Torvalds 已提交
2457

2458
	restart = &current->restart_block;
P
Peter Zijlstra 已提交
2459
	restart->fn = futex_wait_restart;
2460
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
2461 2462 2463
	restart->futex.val = val;
	restart->futex.time = abs_time->tv64;
	restart->futex.bitset = bitset;
2464
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2465

P
Peter Zijlstra 已提交
2466 2467
	ret = -ERESTART_RESTARTBLOCK;

2468
out:
2469 2470 2471 2472
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
2473 2474 2475
	return ret;
}

N
Nick Piggin 已提交
2476 2477 2478

static long futex_wait_restart(struct restart_block *restart)
{
2479
	u32 __user *uaddr = restart->futex.uaddr;
2480
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
2481

2482 2483 2484 2485
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
		t.tv64 = restart->futex.time;
		tp = &t;
	}
N
Nick Piggin 已提交
2486
	restart->fn = do_no_restart_syscall;
2487 2488 2489

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
2490 2491 2492
}


2493 2494 2495
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
2496 2497 2498 2499 2500
 * if there are waiters then it will block as a consequence of relying
 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
 * a 0 value of the futex too.).
 *
 * Also serves as futex trylock_pi()'ing, and due semantics.
2501
 */
2502
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2503
			 ktime_t *time, int trylock)
2504
{
2505
	struct hrtimer_sleeper timeout, *to = NULL;
2506
	struct futex_hash_bucket *hb;
2507
	struct futex_q q = futex_q_init;
2508
	int res, ret;
2509 2510 2511 2512

	if (refill_pi_state_cache())
		return -ENOMEM;

2513
	if (time) {
2514
		to = &timeout;
2515 2516
		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
				      HRTIMER_MODE_ABS);
2517
		hrtimer_init_sleeper(to, current);
2518
		hrtimer_set_expires(&to->timer, *time);
2519 2520
	}

2521
retry:
2522
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2523
	if (unlikely(ret != 0))
2524
		goto out;
2525

D
Darren Hart 已提交
2526
retry_private:
E
Eric Sesterhenn 已提交
2527
	hb = queue_lock(&q);
2528

2529
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2530
	if (unlikely(ret)) {
2531 2532 2533 2534
		/*
		 * Atomic work succeeded and we got the lock,
		 * or failed. Either way, we do _not_ block.
		 */
2535
		switch (ret) {
2536 2537 2538 2539 2540 2541
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2542 2543
		case -EAGAIN:
			/*
2544 2545 2546 2547
			 * Two reasons for this:
			 * - Task is exiting and we just wait for the
			 *   exit to complete.
			 * - The user space value changed.
2548
			 */
J
Jason Low 已提交
2549
			queue_unlock(hb);
2550
			put_futex_key(&q.key);
2551 2552 2553
			cond_resched();
			goto retry;
		default:
2554
			goto out_unlock_put_key;
2555 2556 2557 2558 2559 2560
		}
	}

	/*
	 * Only actually queue now that the atomic ops are done:
	 */
E
Eric Sesterhenn 已提交
2561
	queue_me(&q, hb);
2562 2563 2564 2565 2566

	WARN_ON(!q.pi_state);
	/*
	 * Block on the PI mutex:
	 */
2567 2568 2569
	if (!trylock) {
		ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
	} else {
2570 2571 2572 2573 2574
		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
	}

2575
	spin_lock(q.lock_ptr);
2576 2577 2578 2579
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
2580
	res = fixup_owner(uaddr, &q, !ret);
2581 2582 2583 2584 2585 2586
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
2587

2588
	/*
2589 2590
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
2591 2592 2593 2594
	 */
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
		rt_mutex_unlock(&q.pi_state->pi_mutex);

2595 2596
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
2597

2598
	goto out_put_key;
2599

2600
out_unlock_put_key:
J
Jason Low 已提交
2601
	queue_unlock(hb);
2602

2603
out_put_key:
2604
	put_futex_key(&q.key);
2605
out:
2606 2607
	if (to)
		destroy_hrtimer_on_stack(&to->timer);
2608
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2609

2610
uaddr_faulted:
J
Jason Low 已提交
2611
	queue_unlock(hb);
2612

2613
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
2614 2615
	if (ret)
		goto out_put_key;
2616

2617
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2618 2619
		goto retry_private;

2620
	put_futex_key(&q.key);
D
Darren Hart 已提交
2621
	goto retry;
2622 2623 2624 2625 2626 2627 2628
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
2629
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2630
{
2631
	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2632
	union futex_key key = FUTEX_KEY_INIT;
2633 2634
	struct futex_hash_bucket *hb;
	struct futex_q *match;
D
Darren Hart 已提交
2635
	int ret;
2636 2637 2638 2639 2640 2641 2642

retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
2643
	if ((uval & FUTEX_TID_MASK) != vpid)
2644 2645
		return -EPERM;

2646
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2647 2648
	if (ret)
		return ret;
2649 2650 2651 2652 2653

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
2654 2655 2656
	 * Check waiters first. We do not trust user space values at
	 * all and we at least want to know if user space fiddled
	 * with the futex value instead of blindly unlocking.
2657
	 */
2658 2659
	match = futex_top_waiter(hb, &key);
	if (match) {
2660 2661 2662 2663 2664 2665 2666
		ret = wake_futex_pi(uaddr, uval, match, hb);
		/*
		 * In case of success wake_futex_pi dropped the hash
		 * bucket lock.
		 */
		if (!ret)
			goto out_putkey;
2667
		/*
2668 2669
		 * The atomic access to the futex value generated a
		 * pagefault, so retry the user-access and the wakeup:
2670 2671 2672
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
2673 2674 2675 2676 2677 2678 2679 2680 2681
		/*
		 * A unconditional UNLOCK_PI op raced against a waiter
		 * setting the FUTEX_WAITERS bit. Try again.
		 */
		if (ret == -EAGAIN) {
			spin_unlock(&hb->lock);
			put_futex_key(&key);
			goto retry;
		}
2682 2683 2684 2685
		/*
		 * wake_futex_pi has detected invalid state. Tell user
		 * space.
		 */
2686 2687
		goto out_unlock;
	}
2688

2689
	/*
2690 2691 2692 2693 2694
	 * We have no kernel internal state, i.e. no waiters in the
	 * kernel. Waiters which are about to queue themselves are stuck
	 * on hb->lock. So we can safely ignore them. We do neither
	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
	 * owner.
2695
	 */
2696
	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2697
		goto pi_faulted;
2698

2699 2700 2701 2702 2703
	/*
	 * If uval has changed, let user space handle it.
	 */
	ret = (curval == uval) ? 0 : -EAGAIN;

2704 2705
out_unlock:
	spin_unlock(&hb->lock);
2706
out_putkey:
2707
	put_futex_key(&key);
2708 2709 2710
	return ret;

pi_faulted:
2711
	spin_unlock(&hb->lock);
2712
	put_futex_key(&key);
2713

2714
	ret = fault_in_user_writeable(uaddr);
2715
	if (!ret)
2716 2717
		goto retry;

L
Linus Torvalds 已提交
2718 2719 2720
	return ret;
}

2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
2733 2734 2735
 * Return:
 *  0 = no early wakeup detected;
 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
2757
		plist_del(&q->list, &hb->chain);
2758
		hb_waiters_dec(hb);
2759

T
Thomas Gleixner 已提交
2760
		/* Handle spurious wakeups gracefully */
2761
		ret = -EWOULDBLOCK;
2762 2763
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
2764
		else if (signal_pending(current))
2765
			ret = -ERESTARTNOINTR;
2766 2767 2768 2769 2770 2771
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2772
 * @uaddr:	the futex we initially wait on (non-pi)
2773
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2774
 *		the same type, no requeueing from private to shared, etc.
2775 2776
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
2777
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
2778 2779 2780
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2781 2782 2783 2784 2785
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
2786 2787
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2788
 * via the following--
2789
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2790 2791 2792
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
2793
 *
2794
 * If 3, cleanup and return -ERESTARTNOINTR.
2795 2796 2797 2798 2799 2800 2801
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
2802
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2803 2804 2805
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
2806 2807
 * Return:
 *  0 - On success;
2808 2809
 * <0 - On error
 */
2810
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2811
				 u32 val, ktime_t *abs_time, u32 bitset,
2812
				 u32 __user *uaddr2)
2813 2814 2815 2816 2817
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct rt_mutex_waiter rt_waiter;
	struct rt_mutex *pi_mutex = NULL;
	struct futex_hash_bucket *hb;
2818 2819
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
2820 2821
	int res, ret;

2822 2823 2824
	if (uaddr == uaddr2)
		return -EINVAL;

2825 2826 2827 2828 2829
	if (!bitset)
		return -EINVAL;

	if (abs_time) {
		to = &timeout;
2830 2831 2832
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2833 2834 2835 2836 2837 2838 2839 2840 2841 2842
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
	debug_rt_mutex_init_waiter(&rt_waiter);
2843 2844
	RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
	RB_CLEAR_NODE(&rt_waiter.tree_entry);
2845 2846
	rt_waiter.task = NULL;

2847
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2848 2849 2850
	if (unlikely(ret != 0))
		goto out;

2851 2852 2853 2854
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

2855 2856 2857 2858
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
2859
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
2860 2861
	if (ret)
		goto out_key2;
2862

2863 2864 2865 2866 2867
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (match_futex(&q.key, &key2)) {
2868
		queue_unlock(hb);
2869 2870 2871 2872
		ret = -EINVAL;
		goto out_put_keys;
	}

2873
	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
2874
	futex_wait_queue_me(hb, &q, to);
2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
		goto out_put_keys;

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
2886 2887 2888
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
2899
			ret = fixup_pi_state_owner(uaddr2, &q, current);
2900 2901 2902 2903
			/*
			 * Drop the reference to the pi state which
			 * the requeue_pi() code acquired for us.
			 */
2904
			put_pi_state(q.pi_state);
2905 2906 2907 2908 2909 2910 2911 2912
			spin_unlock(q.lock_ptr);
		}
	} else {
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
2913
		WARN_ON(!q.pi_state);
2914
		pi_mutex = &q.pi_state->pi_mutex;
2915
		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2916 2917 2918 2919 2920 2921 2922
		debug_rt_mutex_free_waiter(&rt_waiter);

		spin_lock(q.lock_ptr);
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
2923
		res = fixup_owner(uaddr2, &q, !ret);
2924 2925
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
2926
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
		 */
		if (res)
			ret = (res < 0) ? res : 0;

		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

	/*
	 * If fixup_pi_state_owner() faulted and was unable to handle the
	 * fault, unlock the rt_mutex and return the fault to userspace.
	 */
	if (ret == -EFAULT) {
2940
		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2941 2942 2943
			rt_mutex_unlock(pi_mutex);
	} else if (ret == -EINTR) {
		/*
2944 2945 2946 2947 2948
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
2949
		 */
2950
		ret = -EWOULDBLOCK;
2951 2952 2953
	}

out_put_keys:
2954
	put_futex_key(&q.key);
T
Thomas Gleixner 已提交
2955
out_key2:
2956
	put_futex_key(&key2);
2957 2958 2959 2960 2961 2962 2963 2964 2965

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

2966 2967 2968 2969 2970 2971 2972
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
2973
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2974 2975 2976 2977 2978 2979 2980 2981
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
2982 2983 2984
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
2985
 */
2986 2987
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
2988
{
2989 2990
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
3003 3004 3005 3006
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
3007
 */
3008 3009 3010
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
3011
{
A
Al Viro 已提交
3012
	struct robust_list_head __user *head;
3013
	unsigned long ret;
3014
	struct task_struct *p;
3015

3016 3017 3018
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

3019 3020 3021
	rcu_read_lock();

	ret = -ESRCH;
3022
	if (!pid)
3023
		p = current;
3024
	else {
3025
		p = find_task_by_vpid(pid);
3026 3027 3028 3029
		if (!p)
			goto err_unlock;
	}

3030
	ret = -EPERM;
3031
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3032 3033 3034 3035 3036
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

3037 3038 3039 3040 3041
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
3042
	rcu_read_unlock();
3043 3044 3045 3046 3047 3048 3049 3050

	return ret;
}

/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
3051
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3052
{
3053
	u32 uval, uninitialized_var(nval), mval;
3054

3055 3056
retry:
	if (get_user(uval, uaddr))
3057 3058
		return -1;

3059
	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
		/*
		 * Ok, this dying thread is truly holding a futex
		 * of interest. Set the OWNER_DIED bit atomically
		 * via cmpxchg, and if the value had FUTEX_WAITERS
		 * set, wake up a waiter (if any). (We have to do a
		 * futex_wake() even if OWNER_DIED is already set -
		 * to handle the rare but possible case of recursive
		 * thread-death.) The rest of the cleanup is done in
		 * userspace.
		 */
3070
		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084
		/*
		 * We are not holding a lock here, but we want to have
		 * the pagefault_disable/enable() protection because
		 * we want to handle the fault gracefully. If the
		 * access fails we try to fault in the futex with R/W
		 * verification via get_user_pages. get_user() above
		 * does not guarantee R/W access. If that fails we
		 * give up and leave the futex locked.
		 */
		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
		}
3085
		if (nval != uval)
3086
			goto retry;
3087

3088 3089 3090 3091
		/*
		 * Wake robust non-PI futexes here. The wakeup of
		 * PI futexes happens in exit_pi_state():
		 */
T
Thomas Gleixner 已提交
3092
		if (!pi && (uval & FUTEX_WAITERS))
P
Peter Zijlstra 已提交
3093
			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3094 3095 3096 3097
	}
	return 0;
}

3098 3099 3100 3101
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
3102
				     struct robust_list __user * __user *head,
3103
				     unsigned int *pi)
3104 3105 3106
{
	unsigned long uentry;

A
Al Viro 已提交
3107
	if (get_user(uentry, (unsigned long __user *)head))
3108 3109
		return -EFAULT;

A
Al Viro 已提交
3110
	*entry = (void __user *)(uentry & ~1UL);
3111 3112 3113 3114 3115
	*pi = uentry & 1;

	return 0;
}

3116 3117 3118 3119 3120 3121 3122 3123 3124
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void exit_robust_list(struct task_struct *curr)
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
3125
	struct robust_list __user *entry, *next_entry, *pending;
3126 3127
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
3128
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
3129
	int rc;
3130

3131 3132 3133
	if (!futex_cmpxchg_enabled)
		return;

3134 3135 3136 3137
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
3138
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
3139 3140 3141 3142 3143 3144 3145 3146 3147 3148
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
3149
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3150
		return;
3151

M
Martin Schwidefsky 已提交
3152
	next_entry = NULL;	/* avoid warning with gcc */
3153
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
3154 3155 3156 3157 3158
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3159 3160
		/*
		 * A pending lock might already be on the list, so
3161
		 * don't process it twice:
3162 3163
		 */
		if (entry != pending)
A
Al Viro 已提交
3164
			if (handle_futex_death((void __user *)entry + futex_offset,
3165
						curr, pi))
3166
				return;
M
Martin Schwidefsky 已提交
3167
		if (rc)
3168
			return;
M
Martin Schwidefsky 已提交
3169 3170
		entry = next_entry;
		pi = next_pi;
3171 3172 3173 3174 3175 3176 3177 3178
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
3179 3180 3181 3182

	if (pending)
		handle_futex_death((void __user *)pending + futex_offset,
				   curr, pip);
3183 3184
}

3185
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3186
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
3187
{
T
Thomas Gleixner 已提交
3188
	int cmd = op & FUTEX_CMD_MASK;
3189
	unsigned int flags = 0;
E
Eric Dumazet 已提交
3190 3191

	if (!(op & FUTEX_PRIVATE_FLAG))
3192
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
3193

3194 3195
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
3196 3197
		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
		    cmd != FUTEX_WAIT_REQUEUE_PI)
3198 3199
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
3200

3201 3202 3203 3204 3205 3206 3207 3208 3209 3210
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
3211
	switch (cmd) {
L
Linus Torvalds 已提交
3212
	case FUTEX_WAIT:
3213 3214
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
3215
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
3216
	case FUTEX_WAKE:
3217 3218
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
3219
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
3220
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
3221
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
3222
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
3223
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3224
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
3225
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3226
	case FUTEX_LOCK_PI:
3227
		return futex_lock_pi(uaddr, flags, timeout, 0);
3228
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
3229
		return futex_unlock_pi(uaddr, flags);
3230
	case FUTEX_TRYLOCK_PI:
3231
		return futex_lock_pi(uaddr, flags, NULL, 1);
3232 3233
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
3234 3235
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
3236
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
3237
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
3238
	}
T
Thomas Gleixner 已提交
3239
	return -ENOSYS;
L
Linus Torvalds 已提交
3240 3241 3242
}


3243 3244 3245
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
		struct timespec __user *, utime, u32 __user *, uaddr2,
		u32, val3)
L
Linus Torvalds 已提交
3246
{
3247 3248
	struct timespec ts;
	ktime_t t, *tp = NULL;
3249
	u32 val2 = 0;
E
Eric Dumazet 已提交
3250
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
3251

3252
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3253 3254
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3255 3256
		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
			return -EFAULT;
3257
		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
L
Linus Torvalds 已提交
3258
			return -EFAULT;
3259
		if (!timespec_valid(&ts))
3260
			return -EINVAL;
3261 3262

		t = timespec_to_ktime(ts);
E
Eric Dumazet 已提交
3263
		if (cmd == FUTEX_WAIT)
3264
			t = ktime_add_safe(ktime_get(), t);
3265
		tp = &t;
L
Linus Torvalds 已提交
3266 3267
	}
	/*
3268
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3269
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
3270
	 */
3271
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3272
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3273
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
3274

3275
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
3276 3277
}

3278
static void __init futex_detect_cmpxchg(void)
L
Linus Torvalds 已提交
3279
{
3280
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3281
	u32 curval;
3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299

	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
	 * implementation, the non-functional ones will return
	 * -ENOSYS.
	 */
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
		futex_cmpxchg_enabled = 1;
#endif
}

static int __init futex_init(void)
{
3300
	unsigned int futex_shift;
3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311
	unsigned long i;

#if CONFIG_BASE_SMALL
	futex_hashsize = 16;
#else
	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif

	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
					       futex_hashsize, 0,
					       futex_hashsize < 256 ? HASH_SMALL : 0,
3312 3313 3314
					       &futex_shift, NULL,
					       futex_hashsize, futex_hashsize);
	futex_hashsize = 1UL << futex_shift;
3315 3316

	futex_detect_cmpxchg();
3317

3318
	for (i = 0; i < futex_hashsize; i++) {
3319
		atomic_set(&futex_queues[i].waiters, 0);
3320
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
3321 3322 3323
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
3324 3325
	return 0;
}
3326
__initcall(futex_init);