futex.c 97.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
11 12 13 14
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
15 16 17 18
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
19 20 21
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
22 23 24 25
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
57
#include <linux/signal.h>
58
#include <linux/export.h>
59
#include <linux/magic.h>
60 61
#include <linux/pid.h>
#include <linux/nsproxy.h>
62
#include <linux/ptrace.h>
63
#include <linux/sched/rt.h>
64
#include <linux/sched/wake_q.h>
65
#include <linux/sched/mm.h>
66
#include <linux/hugetlb.h>
C
Colin Cross 已提交
67
#include <linux/freezer.h>
68
#include <linux/bootmem.h>
69
#include <linux/fault-inject.h>
70

71
#include <asm/futex.h>
L
Linus Torvalds 已提交
72

73
#include "locking/rtmutex_common.h"
74

75
/*
76 77 78 79
 * READ this before attempting to hack on futexes!
 *
 * Basic futex operation and ordering guarantees
 * =============================================
80 81 82 83
 *
 * The waiter reads the futex value in user space and calls
 * futex_wait(). This function computes the hash bucket and acquires
 * the hash bucket lock. After that it reads the futex user space value
84 85 86
 * again and verifies that the data has not changed. If it has not changed
 * it enqueues itself into the hash bucket, releases the hash bucket lock
 * and schedules.
87 88
 *
 * The waker side modifies the user space value of the futex and calls
89 90 91
 * futex_wake(). This function computes the hash bucket and acquires the
 * hash bucket lock. Then it looks for waiters on that futex in the hash
 * bucket and wakes them.
92
 *
93 94 95 96 97
 * In futex wake up scenarios where no tasks are blocked on a futex, taking
 * the hb spinlock can be avoided and simply return. In order for this
 * optimization to work, ordering guarantees must exist so that the waiter
 * being added to the list is acknowledged when the list is concurrently being
 * checked by the waker, avoiding scenarios like the following:
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
 *
 * CPU 0                               CPU 1
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
 *   uval = *futex;
 *                                     *futex = newval;
 *                                     sys_futex(WAKE, futex);
 *                                       futex_wake(futex);
 *                                       if (queue_empty())
 *                                         return;
 *   if (uval == val)
 *      lock(hash_bucket(futex));
 *      queue();
 *     unlock(hash_bucket(futex));
 *     schedule();
 *
 * This would cause the waiter on CPU 0 to wait forever because it
 * missed the transition of the user space value from val to newval
 * and the waker did not find the waiter in the hash bucket queue.
 *
119 120 121 122 123
 * The correct serialization ensures that a waiter either observes
 * the changed user space value before blocking or is woken by a
 * concurrent waker:
 *
 * CPU 0                                 CPU 1
124 125 126
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
127
 *
128
 *   waiters++; (a)
129 130 131 132 133 134 135 136 137 138
 *   smp_mb(); (A) <-- paired with -.
 *                                  |
 *   lock(hash_bucket(futex));      |
 *                                  |
 *   uval = *futex;                 |
 *                                  |        *futex = newval;
 *                                  |        sys_futex(WAKE, futex);
 *                                  |          futex_wake(futex);
 *                                  |
 *                                  `--------> smp_mb(); (B)
139
 *   if (uval == val)
140
 *     queue();
141
 *     unlock(hash_bucket(futex));
142 143
 *     schedule();                         if (waiters)
 *                                           lock(hash_bucket(futex));
144 145
 *   else                                    wake_waiters(futex);
 *     waiters--; (b)                        unlock(hash_bucket(futex));
146
 *
147 148
 * Where (A) orders the waiters increment and the futex value read through
 * atomic operations (see hb_waiters_inc) and where (B) orders the write
149 150
 * to futex and the waiters read -- this is done by the barriers for both
 * shared and private futexes in get_futex_key_refs().
151 152 153 154 155 156 157 158 159 160 161 162
 *
 * This yields the following case (where X:=waiters, Y:=futex):
 *
 *	X = Y = 0
 *
 *	w[X]=1		w[Y]=1
 *	MB		MB
 *	r[Y]=y		r[X]=x
 *
 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 * the guarantee that we cannot both miss the futex variable change and the
 * enqueue.
163 164 165 166 167 168 169 170 171 172 173
 *
 * Note that a new waiter is accounted for in (a) even when it is possible that
 * the wait call can return error, in which case we backtrack from it in (b).
 * Refer to the comment in queue_lock().
 *
 * Similarly, in order to account for waiters being requeued on another
 * address we always increment the waiters for the destination bucket before
 * acquiring the lock. It then decrements them again  after releasing it -
 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 * will do the additional required waiter count housekeeping. This is done for
 * double_lock_hb() and double_unlock_hb(), respectively.
174 175
 */

176
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
177
int __read_mostly futex_cmpxchg_enabled;
178
#endif
179

180 181 182 183
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
184 185 186 187 188 189 190 191 192
#ifdef CONFIG_MMU
# define FLAGS_SHARED		0x01
#else
/*
 * NOMMU does not have per process address space. Let the compiler optimize
 * code away.
 */
# define FLAGS_SHARED		0x00
#endif
193 194 195
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
	atomic_t refcount;

	union futex_key key;
215
} __randomize_layout;
216

217 218
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
219
 * @list:		priority-sorted list of tasks waiting on this futex
220 221 222 223 224 225 226 227
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
228
 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
L
Linus Torvalds 已提交
229 230 231
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
232
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
233
 * The order of wakeup is always to make the first condition true, then
234 235 236 237
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
238 239
 */
struct futex_q {
P
Pierre Peiffer 已提交
240
	struct plist_node list;
L
Linus Torvalds 已提交
241

242
	struct task_struct *task;
L
Linus Torvalds 已提交
243 244
	spinlock_t *lock_ptr;
	union futex_key key;
245
	struct futex_pi_state *pi_state;
246
	struct rt_mutex_waiter *rt_waiter;
247
	union futex_key *requeue_pi_key;
248
	u32 bitset;
249
} __randomize_layout;
L
Linus Torvalds 已提交
250

251 252 253 254 255 256
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
257
/*
D
Darren Hart 已提交
258 259 260
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
261 262
 */
struct futex_hash_bucket {
263
	atomic_t waiters;
P
Pierre Peiffer 已提交
264 265
	spinlock_t lock;
	struct plist_head chain;
266
} ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
267

268 269 270 271 272 273 274 275 276 277 278
/*
 * The base of the bucket array and its size are always used together
 * (after initialization only in hash_futex()), so ensure that they
 * reside in the same cacheline.
 */
static struct {
	struct futex_hash_bucket *queues;
	unsigned long            hashsize;
} __futex_data __read_mostly __aligned(2*sizeof(long));
#define futex_queues   (__futex_data.queues)
#define futex_hashsize (__futex_data.hashsize)
279

L
Linus Torvalds 已提交
280

281 282 283 284 285 286 287 288
/*
 * Fault injections for futexes.
 */
#ifdef CONFIG_FAIL_FUTEX

static struct {
	struct fault_attr attr;

289
	bool ignore_private;
290 291
} fail_futex = {
	.attr = FAULT_ATTR_INITIALIZER,
292
	.ignore_private = false,
293 294 295 296 297 298 299 300
};

static int __init setup_fail_futex(char *str)
{
	return setup_fault_attr(&fail_futex.attr, str);
}
__setup("fail_futex=", setup_fail_futex);

301
static bool should_fail_futex(bool fshared)
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
{
	if (fail_futex.ignore_private && !fshared)
		return false;

	return should_fail(&fail_futex.attr, 1);
}

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init fail_futex_debugfs(void)
{
	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
	struct dentry *dir;

	dir = fault_create_debugfs_attr("fail_futex", NULL,
					&fail_futex.attr);
	if (IS_ERR(dir))
		return PTR_ERR(dir);

	if (!debugfs_create_bool("ignore-private", mode, dir,
				 &fail_futex.ignore_private)) {
		debugfs_remove_recursive(dir);
		return -ENOMEM;
	}

	return 0;
}

late_initcall(fail_futex_debugfs);

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

#else
static inline bool should_fail_futex(bool fshared)
{
	return false;
}
#endif /* CONFIG_FAIL_FUTEX */

341 342
static inline void futex_get_mm(union futex_key *key)
{
V
Vegard Nossum 已提交
343
	mmgrab(key->private.mm);
344 345 346
	/*
	 * Ensure futex_get_mm() implies a full barrier such that
	 * get_futex_key() implies a full barrier. This is relied upon
347
	 * as smp_mb(); (B), see the ordering comment above.
348
	 */
349
	smp_mb__after_atomic();
350 351
}

352 353 354 355
/*
 * Reflects a new waiter being added to the waitqueue.
 */
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
356 357
{
#ifdef CONFIG_SMP
358
	atomic_inc(&hb->waiters);
359
	/*
360
	 * Full barrier (A), see the ordering comment above.
361
	 */
362
	smp_mb__after_atomic();
363 364 365 366 367 368 369 370 371 372 373 374 375
#endif
}

/*
 * Reflects a waiter being removed from the waitqueue by wakeup
 * paths.
 */
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	atomic_dec(&hb->waiters);
#endif
}
376

377 378 379 380
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	return atomic_read(&hb->waiters);
381
#else
382
	return 1;
383 384 385
#endif
}

386 387 388 389 390 391
/**
 * hash_futex - Return the hash bucket in the global hash
 * @key:	Pointer to the futex key for which the hash is calculated
 *
 * We hash on the keys returned from get_futex_key (see below) and return the
 * corresponding hash bucket in the global hash.
L
Linus Torvalds 已提交
392 393 394 395 396 397
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
398
	return &futex_queues[hash & (futex_hashsize - 1)];
L
Linus Torvalds 已提交
399 400
}

401 402 403 404 405 406

/**
 * match_futex - Check whether two futex keys are equal
 * @key1:	Pointer to key1
 * @key2:	Pointer to key2
 *
L
Linus Torvalds 已提交
407 408 409 410
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
411 412
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
413 414 415 416
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

417 418 419 420 421 422 423 424 425 426
/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
static void get_futex_key_refs(union futex_key *key)
{
	if (!key->both.ptr)
		return;

427 428 429 430 431 432 433 434 435 436
	/*
	 * On MMU less systems futexes are always "private" as there is no per
	 * process address space. We need the smp wmb nevertheless - yes,
	 * arch/blackfin has MMU less SMP ...
	 */
	if (!IS_ENABLED(CONFIG_MMU)) {
		smp_mb(); /* explicit smp_mb(); (B) */
		return;
	}

437 438
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
439
		ihold(key->shared.inode); /* implies smp_mb(); (B) */
440 441
		break;
	case FUT_OFF_MMSHARED:
442
		futex_get_mm(key); /* implies smp_mb(); (B) */
443
		break;
444
	default:
445 446 447 448 449
		/*
		 * Private futexes do not hold reference on an inode or
		 * mm, therefore the only purpose of calling get_futex_key_refs
		 * is because we need the barrier for the lockless waiter check.
		 */
450
		smp_mb(); /* explicit smp_mb(); (B) */
451 452 453 454 455
	}
}

/*
 * Drop a reference to the resource addressed by a key.
456 457 458
 * The hash bucket spinlock must not be held. This is
 * a no-op for private futexes, see comment in the get
 * counterpart.
459 460 461
 */
static void drop_futex_key_refs(union futex_key *key)
{
462 463 464
	if (!key->both.ptr) {
		/* If we're here then we tried to put a key we failed to get */
		WARN_ON_ONCE(1);
465
		return;
466
	}
467

468 469 470
	if (!IS_ENABLED(CONFIG_MMU))
		return;

471 472 473 474 475 476 477 478 479 480
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
		iput(key->shared.inode);
		break;
	case FUT_OFF_MMSHARED:
		mmdrop(key->private.mm);
		break;
	}
}

E
Eric Dumazet 已提交
481
/**
482 483 484 485
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 * @key:	address where result is stored.
486 487
 * @rw:		mapping needs to be read/write (values: VERIFY_READ,
 *              VERIFY_WRITE)
E
Eric Dumazet 已提交
488
 *
489 490
 * Return: a negative error code or 0
 *
491
 * The key words are stored in @key on success.
L
Linus Torvalds 已提交
492
 *
A
Al Viro 已提交
493
 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
L
Linus Torvalds 已提交
494 495 496
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
D
Darren Hart 已提交
497
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
498
 */
499
static int
500
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
L
Linus Torvalds 已提交
501
{
502
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
503
	struct mm_struct *mm = current->mm;
504
	struct page *page, *tail;
505
	struct address_space *mapping;
506
	int err, ro = 0;
L
Linus Torvalds 已提交
507 508 509 510

	/*
	 * The futex address must be "naturally" aligned.
	 */
511
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
512
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
513
		return -EINVAL;
514
	address -= key->both.offset;
L
Linus Torvalds 已提交
515

516 517 518
	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
		return -EFAULT;

519 520 521
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

E
Eric Dumazet 已提交
522 523 524 525 526 527 528 529 530 531
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		key->private.mm = mm;
		key->private.address = address;
532
		get_futex_key_refs(key);  /* implies smp_mb(); (B) */
E
Eric Dumazet 已提交
533 534
		return 0;
	}
L
Linus Torvalds 已提交
535

536
again:
537 538 539 540
	/* Ignore any VERIFY_READ mapping (futex common case) */
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

541
	err = get_user_pages_fast(address, 1, 1, &page);
542 543 544 545 546 547 548 549
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
	if (err == -EFAULT && rw == VERIFY_READ) {
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
550 551
	if (err < 0)
		return err;
552 553
	else
		err = 0;
554

555 556 557 558 559 560 561 562 563 564
	/*
	 * The treatment of mapping from this point on is critical. The page
	 * lock protects many things but in this context the page lock
	 * stabilizes mapping, prevents inode freeing in the shared
	 * file-backed region case and guards against movement to swap cache.
	 *
	 * Strictly speaking the page lock is not needed in all cases being
	 * considered here and page lock forces unnecessarily serialization
	 * From this point on, mapping will be re-verified if necessary and
	 * page lock will be acquired only if it is unavoidable
565 566 567 568 569 570 571
	 *
	 * Mapping checks require the head page for any compound page so the
	 * head page and mapping is looked up now. For anonymous pages, it
	 * does not matter if the page splits in the future as the key is
	 * based on the address. For filesystem-backed pages, the tail is
	 * required as the index of the page determines the key. For
	 * base pages, there is no tail page and tail == page.
572
	 */
573
	tail = page;
574 575 576
	page = compound_head(page);
	mapping = READ_ONCE(page->mapping);

577
	/*
578
	 * If page->mapping is NULL, then it cannot be a PageAnon
579 580 581 582 583 584 585 586 587 588 589
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
590
	 * an unlikely race, but we do need to retry for page->mapping.
591
	 */
592 593 594 595 596 597 598 599 600 601
	if (unlikely(!mapping)) {
		int shmem_swizzled;

		/*
		 * Page lock is required to identify which special case above
		 * applies. If this is really a shmem page then the page lock
		 * will prevent unexpected transitions.
		 */
		lock_page(page);
		shmem_swizzled = PageSwapCache(page) || page->mapping;
602 603
		unlock_page(page);
		put_page(page);
604

605 606
		if (shmem_swizzled)
			goto again;
607

608
		return -EFAULT;
609
	}
L
Linus Torvalds 已提交
610 611 612 613

	/*
	 * Private mappings are handled in a simple way.
	 *
614 615 616
	 * If the futex key is stored on an anonymous page, then the associated
	 * object is the mm which is implicitly pinned by the calling process.
	 *
L
Linus Torvalds 已提交
617 618
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
619
	 * the object not the particular process.
L
Linus Torvalds 已提交
620
	 */
621
	if (PageAnon(page)) {
622 623 624 625
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
626
		if (unlikely(should_fail_futex(fshared)) || ro) {
627 628 629 630
			err = -EFAULT;
			goto out;
		}

631
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
632
		key->private.mm = mm;
633
		key->private.address = address;
634 635 636

		get_futex_key_refs(key); /* implies smp_mb(); (B) */

637
	} else {
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
		struct inode *inode;

		/*
		 * The associated futex object in this case is the inode and
		 * the page->mapping must be traversed. Ordinarily this should
		 * be stabilised under page lock but it's not strictly
		 * necessary in this case as we just want to pin the inode, not
		 * update the radix tree or anything like that.
		 *
		 * The RCU read lock is taken as the inode is finally freed
		 * under RCU. If the mapping still matches expectations then the
		 * mapping->host can be safely accessed as being a valid inode.
		 */
		rcu_read_lock();

		if (READ_ONCE(page->mapping) != mapping) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		inode = READ_ONCE(mapping->host);
		if (!inode) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/*
		 * Take a reference unless it is about to be freed. Previously
		 * this reference was taken by ihold under the page lock
		 * pinning the inode in place so i_lock was unnecessary. The
		 * only way for this check to fail is if the inode was
673 674
		 * truncated in parallel which is almost certainly an
		 * application bug. In such a case, just retry.
675 676 677 678 679
		 *
		 * We are not calling into get_futex_key_refs() in file-backed
		 * cases, therefore a successful atomic_inc return below will
		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
		 */
680
		if (!atomic_inc_not_zero(&inode->i_count)) {
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/* Should be impossible but lets be paranoid for now */
		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
			err = -EFAULT;
			rcu_read_unlock();
			iput(inode);

			goto out;
		}

696
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
697
		key->shared.inode = inode;
698
		key->shared.pgoff = basepage_index(tail);
699
		rcu_read_unlock();
L
Linus Torvalds 已提交
700 701
	}

702
out:
703
	put_page(page);
704
	return err;
L
Linus Torvalds 已提交
705 706
}

707
static inline void put_futex_key(union futex_key *key)
L
Linus Torvalds 已提交
708
{
709
	drop_futex_key_refs(key);
L
Linus Torvalds 已提交
710 711
}

712 713
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
714 715 716 717 718
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
719
 * We have no generic implementation of a non-destructive write to the
720 721 722 723 724 725
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
726 727 728 729
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
730
	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
731
			       FAULT_FLAG_WRITE, NULL);
732 733
	up_read(&mm->mmap_sem);

734 735 736
	return ret < 0 ? ret : 0;
}

737 738
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
739 740
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

756 757
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
758
{
759
	int ret;
T
Thomas Gleixner 已提交
760 761

	pagefault_disable();
762
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
763 764
	pagefault_enable();

765
	return ret;
T
Thomas Gleixner 已提交
766 767 768
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
769 770 771
{
	int ret;

772
	pagefault_disable();
773
	ret = __get_user(*dest, from);
774
	pagefault_enable();
L
Linus Torvalds 已提交
775 776 777 778

	return ret ? -EFAULT : 0;
}

779 780 781 782 783 784 785 786 787 788 789

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

790
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
791 792 793 794 795 796 797 798

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
	atomic_set(&pi_state->refcount, 1);
799
	pi_state->key = FUTEX_KEY_INIT;
800 801 802 803 804 805

	current->pi_state_cache = pi_state;

	return 0;
}

P
Peter Zijlstra 已提交
806
static struct futex_pi_state *alloc_pi_state(void)
807 808 809 810 811 812 813 814 815
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

P
Peter Zijlstra 已提交
816 817 818 819 820
static void get_pi_state(struct futex_pi_state *pi_state)
{
	WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
}

821
/*
822 823
 * Drops a reference to the pi_state object and frees or caches it
 * when the last reference is gone.
824
 */
825
static void put_pi_state(struct futex_pi_state *pi_state)
826
{
827 828 829
	if (!pi_state)
		return;

830 831 832 833 834 835 836 837
	if (!atomic_dec_and_test(&pi_state->refcount))
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
838
		struct task_struct *owner;
839

840 841 842 843 844 845 846 847 848
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		owner = pi_state->owner;
		if (owner) {
			raw_spin_lock(&owner->pi_lock);
			list_del_init(&pi_state->list);
			raw_spin_unlock(&owner->pi_lock);
		}
		rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
849 850
	}

851
	if (current->pi_state_cache) {
852
		kfree(pi_state);
853
	} else {
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
		atomic_set(&pi_state->refcount, 1);
		current->pi_state_cache = pi_state;
	}
}

/*
 * Look up the task based on what TID userspace gave us.
 * We dont trust it.
 */
P
Peter Zijlstra 已提交
869
static struct task_struct *futex_find_get_task(pid_t pid)
870 871 872
{
	struct task_struct *p;

873
	rcu_read_lock();
874
	p = find_task_by_vpid(pid);
875 876
	if (p)
		get_task_struct(p);
877

878
	rcu_read_unlock();
879 880 881 882

	return p;
}

883 884
#ifdef CONFIG_FUTEX_PI

885 886 887 888 889 890 891 892 893
/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
void exit_pi_state_list(struct task_struct *curr)
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
894
	struct futex_hash_bucket *hb;
895
	union futex_key key = FUTEX_KEY_INIT;
896

897 898
	if (!futex_cmpxchg_enabled)
		return;
899 900 901
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
902
	 * versus waiters unqueueing themselves:
903
	 */
904
	raw_spin_lock_irq(&curr->pi_lock);
905 906 907 908
	while (!list_empty(head)) {
		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
909
		hb = hash_futex(&key);
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926

		/*
		 * We can race against put_pi_state() removing itself from the
		 * list (a waiter going away). put_pi_state() will first
		 * decrement the reference count and then modify the list, so
		 * its possible to see the list entry but fail this reference
		 * acquire.
		 *
		 * In that case; drop the locks to let put_pi_state() make
		 * progress and retry the loop.
		 */
		if (!atomic_inc_not_zero(&pi_state->refcount)) {
			raw_spin_unlock_irq(&curr->pi_lock);
			cpu_relax();
			raw_spin_lock_irq(&curr->pi_lock);
			continue;
		}
927
		raw_spin_unlock_irq(&curr->pi_lock);
928 929

		spin_lock(&hb->lock);
930 931
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		raw_spin_lock(&curr->pi_lock);
932 933 934 935
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
936
		if (head->next != next) {
937
			/* retain curr->pi_lock for the loop invariant */
938
			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
939
			spin_unlock(&hb->lock);
940
			put_pi_state(pi_state);
941 942 943 944
			continue;
		}

		WARN_ON(pi_state->owner != curr);
945 946
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
947 948
		pi_state->owner = NULL;

949
		raw_spin_unlock(&curr->pi_lock);
950
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
951 952
		spin_unlock(&hb->lock);

953 954 955
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);

956
		raw_spin_lock_irq(&curr->pi_lock);
957
	}
958
	raw_spin_unlock_irq(&curr->pi_lock);
959 960
}

961 962
#endif

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
/*
 * We need to check the following states:
 *
 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 *
 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 *
 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 *
 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 *
 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 *
 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 *
 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 *
 * [1]	Indicates that the kernel can acquire the futex atomically. We
 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 *
 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 *      thread is found then it indicates that the owner TID has died.
 *
 * [3]	Invalid. The waiter is queued on a non PI futex
 *
 * [4]	Valid state after exit_robust_list(), which sets the user space
 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 *
 * [5]	The user space value got manipulated between exit_robust_list()
 *	and exit_pi_state_list()
 *
 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 *	the pi_state but cannot access the user space value.
 *
 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 *
 * [8]	Owner and user space value match
 *
 * [9]	There is no transient state which sets the user space TID to 0
 *	except exit_robust_list(), but this is indicated by the
 *	FUTEX_OWNER_DIED bit. See [4]
 *
 * [10] There is no transient state which leaves owner and user space
 *	TID out of sync.
P
Peter Zijlstra 已提交
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
 *
 *
 * Serialization and lifetime rules:
 *
 * hb->lock:
 *
 *	hb -> futex_q, relation
 *	futex_q -> pi_state, relation
 *
 *	(cannot be raw because hb can contain arbitrary amount
 *	 of futex_q's)
 *
 * pi_mutex->wait_lock:
 *
 *	{uval, pi_state}
 *
 *	(and pi_mutex 'obviously')
 *
 * p->pi_lock:
 *
 *	p->pi_state_list -> pi_state->list, relation
 *
 * pi_state->refcount:
 *
 *	pi_state lifetime
 *
 *
 * Lock order:
 *
 *   hb->lock
 *     pi_mutex->wait_lock
 *       p->pi_lock
 *
1044
 */
1045 1046 1047 1048 1049 1050

/*
 * Validate that the existing waiter has a pi_state and sanity check
 * the pi_state against the user space value. If correct, attach to
 * it.
 */
P
Peter Zijlstra 已提交
1051 1052
static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
			      struct futex_pi_state *pi_state,
1053
			      struct futex_pi_state **ps)
1054
{
1055
	pid_t pid = uval & FUTEX_TID_MASK;
1056 1057
	u32 uval2;
	int ret;
1058

1059 1060 1061 1062 1063
	/*
	 * Userspace might have messed up non-PI and PI futexes [3]
	 */
	if (unlikely(!pi_state))
		return -EINVAL;
1064

P
Peter Zijlstra 已提交
1065 1066 1067 1068 1069 1070
	/*
	 * We get here with hb->lock held, and having found a
	 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
	 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
	 * which in turn means that futex_lock_pi() still has a reference on
	 * our pi_state.
1071 1072 1073 1074 1075
	 *
	 * The waiter holding a reference on @pi_state also protects against
	 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
	 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
	 * free pi_state before we can take a reference ourselves.
P
Peter Zijlstra 已提交
1076
	 */
1077
	WARN_ON(!atomic_read(&pi_state->refcount));
1078

P
Peter Zijlstra 已提交
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	/*
	 * Now that we have a pi_state, we can acquire wait_lock
	 * and do the state validation.
	 */
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	/*
	 * Since {uval, pi_state} is serialized by wait_lock, and our current
	 * uval was read without holding it, it can have changed. Verify it
	 * still is what we expect it to be, otherwise retry the entire
	 * operation.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		goto out_efault;

	if (uval != uval2)
		goto out_eagain;

1097 1098 1099 1100
	/*
	 * Handle the owner died case:
	 */
	if (uval & FUTEX_OWNER_DIED) {
1101
		/*
1102 1103 1104
		 * exit_pi_state_list sets owner to NULL and wakes the
		 * topmost waiter. The task which acquires the
		 * pi_state->rt_mutex will fixup owner.
1105
		 */
1106
		if (!pi_state->owner) {
1107
			/*
1108 1109
			 * No pi state owner, but the user space TID
			 * is not 0. Inconsistent state. [5]
1110
			 */
1111
			if (pid)
P
Peter Zijlstra 已提交
1112
				goto out_einval;
1113
			/*
1114
			 * Take a ref on the state and return success. [4]
1115
			 */
P
Peter Zijlstra 已提交
1116
			goto out_attach;
1117
		}
1118 1119

		/*
1120 1121 1122 1123 1124 1125 1126 1127
		 * If TID is 0, then either the dying owner has not
		 * yet executed exit_pi_state_list() or some waiter
		 * acquired the rtmutex in the pi state, but did not
		 * yet fixup the TID in user space.
		 *
		 * Take a ref on the state and return success. [6]
		 */
		if (!pid)
P
Peter Zijlstra 已提交
1128
			goto out_attach;
1129 1130 1131 1132
	} else {
		/*
		 * If the owner died bit is not set, then the pi_state
		 * must have an owner. [7]
1133
		 */
1134
		if (!pi_state->owner)
P
Peter Zijlstra 已提交
1135
			goto out_einval;
1136 1137
	}

1138 1139 1140 1141 1142 1143
	/*
	 * Bail out if user space manipulated the futex value. If pi
	 * state exists then the owner TID must be the same as the
	 * user space TID. [9/10]
	 */
	if (pid != task_pid_vnr(pi_state->owner))
P
Peter Zijlstra 已提交
1144 1145 1146
		goto out_einval;

out_attach:
P
Peter Zijlstra 已提交
1147
	get_pi_state(pi_state);
P
Peter Zijlstra 已提交
1148
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1149 1150
	*ps = pi_state;
	return 0;
P
Peter Zijlstra 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166

out_einval:
	ret = -EINVAL;
	goto out_error;

out_eagain:
	ret = -EAGAIN;
	goto out_error;

out_efault:
	ret = -EFAULT;
	goto out_error;

out_error:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
1167 1168
}

1169 1170 1171 1172 1173 1174
/*
 * Lookup the task for the TID provided from user space and attach to
 * it after doing proper sanity checks.
 */
static int attach_to_pi_owner(u32 uval, union futex_key *key,
			      struct futex_pi_state **ps)
1175 1176
{
	pid_t pid = uval & FUTEX_TID_MASK;
1177 1178
	struct futex_pi_state *pi_state;
	struct task_struct *p;
1179

1180
	/*
1181
	 * We are the first waiter - try to look up the real owner and attach
1182
	 * the new pi_state to it, but bail out when TID = 0 [1]
1183
	 */
1184
	if (!pid)
1185
		return -ESRCH;
1186
	p = futex_find_get_task(pid);
1187 1188
	if (!p)
		return -ESRCH;
1189

1190
	if (unlikely(p->flags & PF_KTHREAD)) {
1191 1192 1193 1194
		put_task_struct(p);
		return -EPERM;
	}

1195 1196 1197 1198 1199 1200
	/*
	 * We need to look at the task state flags to figure out,
	 * whether the task is exiting. To protect against the do_exit
	 * change of the task flags, we do this protected by
	 * p->pi_lock:
	 */
1201
	raw_spin_lock_irq(&p->pi_lock);
1202 1203 1204 1205 1206 1207 1208 1209
	if (unlikely(p->flags & PF_EXITING)) {
		/*
		 * The task is on the way out. When PF_EXITPIDONE is
		 * set, we know that the task has finished the
		 * cleanup:
		 */
		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;

1210
		raw_spin_unlock_irq(&p->pi_lock);
1211 1212 1213
		put_task_struct(p);
		return ret;
	}
1214

1215 1216
	/*
	 * No existing pi state. First waiter. [2]
P
Peter Zijlstra 已提交
1217 1218 1219
	 *
	 * This creates pi_state, we have hb->lock held, this means nothing can
	 * observe this state, wait_lock is irrelevant.
1220
	 */
1221 1222 1223
	pi_state = alloc_pi_state();

	/*
1224
	 * Initialize the pi_mutex in locked state and make @p
1225 1226 1227 1228 1229
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
1230
	pi_state->key = *key;
1231

1232
	WARN_ON(!list_empty(&pi_state->list));
1233
	list_add(&pi_state->list, &p->pi_state_list);
1234 1235 1236 1237
	/*
	 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
	 * because there is no concurrency as the object is not published yet.
	 */
1238
	pi_state->owner = p;
1239
	raw_spin_unlock_irq(&p->pi_lock);
1240 1241 1242

	put_task_struct(p);

P
Pierre Peiffer 已提交
1243
	*ps = pi_state;
1244 1245 1246 1247

	return 0;
}

P
Peter Zijlstra 已提交
1248 1249
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
			   struct futex_hash_bucket *hb,
1250 1251
			   union futex_key *key, struct futex_pi_state **ps)
{
1252
	struct futex_q *top_waiter = futex_top_waiter(hb, key);
1253 1254 1255 1256 1257

	/*
	 * If there is a waiter on that futex, validate it and
	 * attach to the pi_state when the validation succeeds.
	 */
1258
	if (top_waiter)
P
Peter Zijlstra 已提交
1259
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1260 1261 1262 1263 1264 1265 1266 1267

	/*
	 * We are the first waiter - try to look up the owner based on
	 * @uval and attach to it.
	 */
	return attach_to_pi_owner(uval, key, ps);
}

1268 1269 1270 1271
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
	u32 uninitialized_var(curval);

1272 1273 1274
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1275 1276 1277
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
		return -EFAULT;

P
Peter Zijlstra 已提交
1278
	/* If user space value changed, let the caller retry */
1279 1280 1281
	return curval != uval ? -EAGAIN : 0;
}

1282
/**
1283
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1284 1285 1286 1287 1288 1289 1290 1291
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1292
 *
1293
 * Return:
1294 1295 1296
 *  -  0 - ready to wait;
 *  -  1 - acquired the lock;
 *  - <0 - error
1297 1298 1299 1300 1301 1302
 *
 * The hb->lock and futex_key refs shall be held by the caller.
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
1303
				struct task_struct *task, int set_waiters)
1304
{
1305
	u32 uval, newval, vpid = task_pid_vnr(task);
1306
	struct futex_q *top_waiter;
1307
	int ret;
1308 1309

	/*
1310 1311
	 * Read the user space value first so we can validate a few
	 * things before proceeding further.
1312
	 */
1313
	if (get_futex_value_locked(&uval, uaddr))
1314 1315
		return -EFAULT;

1316 1317 1318
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1319 1320 1321
	/*
	 * Detect deadlocks.
	 */
1322
	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1323 1324
		return -EDEADLK;

1325 1326 1327
	if ((unlikely(should_fail_futex(true))))
		return -EDEADLK;

1328
	/*
1329 1330
	 * Lookup existing state first. If it exists, try to attach to
	 * its pi_state.
1331
	 */
1332 1333
	top_waiter = futex_top_waiter(hb, key);
	if (top_waiter)
P
Peter Zijlstra 已提交
1334
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1335 1336

	/*
1337 1338 1339 1340
	 * No waiter and user TID is 0. We are here because the
	 * waiters or the owner died bit is set or called from
	 * requeue_cmp_pi or for whatever reason something took the
	 * syscall.
1341
	 */
1342
	if (!(uval & FUTEX_TID_MASK)) {
1343
		/*
1344 1345
		 * We take over the futex. No other waiters and the user space
		 * TID is 0. We preserve the owner died bit.
1346
		 */
1347 1348
		newval = uval & FUTEX_OWNER_DIED;
		newval |= vpid;
1349

1350 1351 1352 1353 1354 1355 1356 1357
		/* The futex requeue_pi code can enforce the waiters bit */
		if (set_waiters)
			newval |= FUTEX_WAITERS;

		ret = lock_pi_update_atomic(uaddr, uval, newval);
		/* If the take over worked, return 1 */
		return ret < 0 ? ret : 1;
	}
1358 1359

	/*
1360 1361 1362
	 * First waiter. Set the waiters bit before attaching ourself to
	 * the owner. If owner tries to unlock, it will be forced into
	 * the kernel and blocked on hb->lock.
1363
	 */
1364 1365 1366 1367
	newval = uval | FUTEX_WAITERS;
	ret = lock_pi_update_atomic(uaddr, uval, newval);
	if (ret)
		return ret;
1368
	/*
1369 1370 1371
	 * If the update of the user space value succeeded, we try to
	 * attach to the owner. If that fails, no harm done, we only
	 * set the FUTEX_WAITERS bit in the user space variable.
1372
	 */
1373
	return attach_to_pi_owner(uval, key, ps);
1374 1375
}

1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

1386 1387
	if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
	    || WARN_ON(plist_node_empty(&q->list)))
1388 1389 1390 1391
		return;

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
1392
	hb_waiters_dec(hb);
1393 1394
}

L
Linus Torvalds 已提交
1395 1396
/*
 * The hash bucket lock must be held when this is called.
1397 1398 1399
 * Afterwards, the futex_q must not be accessed. Callers
 * must ensure to later call wake_up_q() for the actual
 * wakeups to occur.
L
Linus Torvalds 已提交
1400
 */
1401
static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
L
Linus Torvalds 已提交
1402
{
T
Thomas Gleixner 已提交
1403 1404
	struct task_struct *p = q->task;

1405 1406 1407
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

L
Linus Torvalds 已提交
1408
	/*
1409 1410
	 * Queue the task for later wakeup for after we've released
	 * the hb->lock. wake_q_add() grabs reference to p.
L
Linus Torvalds 已提交
1411
	 */
1412
	wake_q_add(wake_q, p);
1413
	__unqueue_futex(q);
L
Linus Torvalds 已提交
1414
	/*
1415 1416 1417 1418 1419
	 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
	 * is written, without taking any locks. This is possible in the event
	 * of a spurious wakeup, for example. A memory barrier is required here
	 * to prevent the following store to lock_ptr from getting ahead of the
	 * plist_del in __unqueue_futex().
L
Linus Torvalds 已提交
1420
	 */
1421
	smp_store_release(&q->lock_ptr, NULL);
L
Linus Torvalds 已提交
1422 1423
}

1424 1425 1426 1427
/*
 * Caller must hold a reference on @pi_state.
 */
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1428
{
1429
	u32 uninitialized_var(curval), newval;
1430
	struct task_struct *new_owner;
P
Peter Zijlstra 已提交
1431
	bool postunlock = false;
1432
	DEFINE_WAKE_Q(wake_q);
1433
	int ret = 0;
1434 1435

	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1436
	if (WARN_ON_ONCE(!new_owner)) {
1437
		/*
1438
		 * As per the comment in futex_unlock_pi() this should not happen.
1439 1440 1441 1442 1443 1444 1445 1446
		 *
		 * When this happens, give up our locks and try again, giving
		 * the futex_lock_pi() instance time to complete, either by
		 * waiting on the rtmutex or removing itself from the futex
		 * queue.
		 */
		ret = -EAGAIN;
		goto out_unlock;
1447
	}
1448 1449

	/*
1450 1451 1452
	 * We pass it to the next owner. The WAITERS bit is always kept
	 * enabled while there is PI state around. We cleanup the owner
	 * died bit, because we are the owner.
1453
	 */
1454
	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1455

1456 1457 1458
	if (unlikely(should_fail_futex(true)))
		ret = -EFAULT;

1459
	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1460
		ret = -EFAULT;
P
Peter Zijlstra 已提交
1461

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
	} else if (curval != uval) {
		/*
		 * If a unconditional UNLOCK_PI operation (user space did not
		 * try the TID->0 transition) raced with a waiter setting the
		 * FUTEX_WAITERS flag between get_user() and locking the hash
		 * bucket lock, retry the operation.
		 */
		if ((FUTEX_TID_MASK & curval) == uval)
			ret = -EAGAIN;
		else
			ret = -EINVAL;
	}
P
Peter Zijlstra 已提交
1474

1475 1476
	if (ret)
		goto out_unlock;
1477

1478 1479 1480 1481 1482
	/*
	 * This is a point of no return; once we modify the uval there is no
	 * going back and subsequent operations must not fail.
	 */

1483
	raw_spin_lock(&pi_state->owner->pi_lock);
1484 1485
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
1486
	raw_spin_unlock(&pi_state->owner->pi_lock);
1487

1488
	raw_spin_lock(&new_owner->pi_lock);
1489
	WARN_ON(!list_empty(&pi_state->list));
1490 1491
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
1492
	raw_spin_unlock(&new_owner->pi_lock);
1493

P
Peter Zijlstra 已提交
1494
	postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1495

1496
out_unlock:
1497 1498
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

P
Peter Zijlstra 已提交
1499 1500
	if (postunlock)
		rt_mutex_postunlock(&wake_q);
1501

1502
	return ret;
1503 1504
}

I
Ingo Molnar 已提交
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
1521 1522 1523
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
1524
	spin_unlock(&hb1->lock);
1525 1526
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
1527 1528
}

L
Linus Torvalds 已提交
1529
/*
D
Darren Hart 已提交
1530
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
1531
 */
1532 1533
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
1534
{
1535
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1536
	struct futex_q *this, *next;
1537
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
1538
	int ret;
1539
	DEFINE_WAKE_Q(wake_q);
L
Linus Torvalds 已提交
1540

1541 1542 1543
	if (!bitset)
		return -EINVAL;

1544
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
L
Linus Torvalds 已提交
1545 1546 1547
	if (unlikely(ret != 0))
		goto out;

1548
	hb = hash_futex(&key);
1549 1550 1551 1552 1553

	/* Make sure we really have tasks to wakeup */
	if (!hb_waiters_pending(hb))
		goto out_put_key;

1554
	spin_lock(&hb->lock);
L
Linus Torvalds 已提交
1555

J
Jason Low 已提交
1556
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
L
Linus Torvalds 已提交
1557
		if (match_futex (&this->key, &key)) {
1558
			if (this->pi_state || this->rt_waiter) {
1559 1560 1561
				ret = -EINVAL;
				break;
			}
1562 1563 1564 1565 1566

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

1567
			mark_wake_futex(&wake_q, this);
L
Linus Torvalds 已提交
1568 1569 1570 1571 1572
			if (++ret >= nr_wake)
				break;
		}
	}

1573
	spin_unlock(&hb->lock);
1574
	wake_up_q(&wake_q);
1575
out_put_key:
1576
	put_futex_key(&key);
1577
out:
L
Linus Torvalds 已提交
1578 1579 1580
	return ret;
}

1581 1582 1583 1584
static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{
	unsigned int op =	  (encoded_op & 0x70000000) >> 28;
	unsigned int cmp =	  (encoded_op & 0x0f000000) >> 24;
1585 1586
	int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
	int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1587 1588 1589
	int oldval, ret;

	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
		if (oparg < 0 || oparg > 31) {
			char comm[sizeof(current->comm)];
			/*
			 * kill this print and return -EINVAL when userspace
			 * is sane again
			 */
			pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
					get_task_comm(comm, current), oparg);
			oparg &= 31;
		}
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
		oparg = 1 << oparg;
	}

	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
		return -EFAULT;

	ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
	if (ret)
		return ret;

	switch (cmp) {
	case FUTEX_OP_CMP_EQ:
		return oldval == cmparg;
	case FUTEX_OP_CMP_NE:
		return oldval != cmparg;
	case FUTEX_OP_CMP_LT:
		return oldval < cmparg;
	case FUTEX_OP_CMP_GE:
		return oldval >= cmparg;
	case FUTEX_OP_CMP_LE:
		return oldval <= cmparg;
	case FUTEX_OP_CMP_GT:
		return oldval > cmparg;
	default:
		return -ENOSYS;
	}
}

1628 1629 1630 1631
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1632
static int
1633
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1634
	      int nr_wake, int nr_wake2, int op)
1635
{
1636
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1637
	struct futex_hash_bucket *hb1, *hb2;
1638
	struct futex_q *this, *next;
D
Darren Hart 已提交
1639
	int ret, op_ret;
1640
	DEFINE_WAKE_Q(wake_q);
1641

D
Darren Hart 已提交
1642
retry:
1643
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1644 1645
	if (unlikely(ret != 0))
		goto out;
1646
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1647
	if (unlikely(ret != 0))
1648
		goto out_put_key1;
1649

1650 1651
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1652

D
Darren Hart 已提交
1653
retry_private:
T
Thomas Gleixner 已提交
1654
	double_lock_hb(hb1, hb2);
1655
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1656 1657
	if (unlikely(op_ret < 0)) {

D
Darren Hart 已提交
1658
		double_unlock_hb(hb1, hb2);
1659

1660
#ifndef CONFIG_MMU
1661 1662 1663 1664
		/*
		 * we don't get EFAULT from MMU faults if we don't have an MMU,
		 * but we might get them from range checking
		 */
1665
		ret = op_ret;
1666
		goto out_put_keys;
1667 1668
#endif

1669 1670
		if (unlikely(op_ret != -EFAULT)) {
			ret = op_ret;
1671
			goto out_put_keys;
1672 1673
		}

1674
		ret = fault_in_user_writeable(uaddr2);
1675
		if (ret)
1676
			goto out_put_keys;
1677

1678
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1679 1680
			goto retry_private;

1681 1682
		put_futex_key(&key2);
		put_futex_key(&key1);
D
Darren Hart 已提交
1683
		goto retry;
1684 1685
	}

J
Jason Low 已提交
1686
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1687
		if (match_futex (&this->key, &key1)) {
1688 1689 1690 1691
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1692
			mark_wake_futex(&wake_q, this);
1693 1694 1695 1696 1697 1698 1699
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
		op_ret = 0;
J
Jason Low 已提交
1700
		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1701
			if (match_futex (&this->key, &key2)) {
1702 1703 1704 1705
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1706
				mark_wake_futex(&wake_q, this);
1707 1708 1709 1710 1711 1712 1713
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1714
out_unlock:
D
Darren Hart 已提交
1715
	double_unlock_hb(hb1, hb2);
1716
	wake_up_q(&wake_q);
1717
out_put_keys:
1718
	put_futex_key(&key2);
1719
out_put_key1:
1720
	put_futex_key(&key1);
1721
out:
1722 1723 1724
	return ret;
}

D
Darren Hart 已提交
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
1743 1744
		hb_waiters_dec(hb1);
		hb_waiters_inc(hb2);
1745
		plist_add(&q->list, &hb2->chain);
D
Darren Hart 已提交
1746 1747 1748 1749 1750 1751
		q->lock_ptr = &hb2->lock;
	}
	get_futex_key_refs(key2);
	q->key = *key2;
}

1752 1753
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1754 1755 1756
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1757 1758 1759 1760 1761
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1762 1763 1764
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1765 1766
 */
static inline
1767 1768
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1769 1770 1771 1772
{
	get_futex_key_refs(key);
	q->key = *key;

1773
	__unqueue_futex(q);
1774 1775 1776 1777

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1778 1779
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1780
	wake_up_state(q->task, TASK_NORMAL);
1781 1782 1783 1784
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1785 1786 1787 1788 1789 1790 1791
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1792 1793
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1794 1795 1796
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1797
 *
1798
 * Return:
1799 1800 1801
 *  -  0 - failed to acquire the lock atomically;
 *  - >0 - acquired the lock, return value is vpid of the top_waiter
 *  - <0 - error
1802 1803 1804 1805 1806
 */
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
				 struct futex_hash_bucket *hb1,
				 struct futex_hash_bucket *hb2,
				 union futex_key *key1, union futex_key *key2,
1807
				 struct futex_pi_state **ps, int set_waiters)
1808
{
1809
	struct futex_q *top_waiter = NULL;
1810
	u32 curval;
1811
	int ret, vpid;
1812 1813 1814 1815

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1816 1817 1818
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1819 1820 1821 1822 1823 1824 1825 1826
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1827 1828 1829 1830 1831 1832
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1833 1834 1835 1836
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1837
	/*
1838 1839 1840
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1841
	 */
1842
	vpid = task_pid_vnr(top_waiter->task);
1843 1844
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
				   set_waiters);
1845
	if (ret == 1) {
1846
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1847 1848
		return vpid;
	}
1849 1850 1851 1852 1853
	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1854
 * @uaddr1:	source futex user address
1855
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1856 1857 1858 1859 1860
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1861
 *		pi futex (pi to pi requeue is not supported)
1862 1863 1864 1865
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1866
 * Return:
1867 1868
 *  - >=0 - on success, the number of tasks requeued or woken;
 *  -  <0 - on error
L
Linus Torvalds 已提交
1869
 */
1870 1871 1872
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
1873
{
1874
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1875 1876
	int drop_count = 0, task_count = 0, ret;
	struct futex_pi_state *pi_state = NULL;
1877
	struct futex_hash_bucket *hb1, *hb2;
L
Linus Torvalds 已提交
1878
	struct futex_q *this, *next;
1879
	DEFINE_WAKE_Q(wake_q);
1880

1881 1882 1883
	if (nr_wake < 0 || nr_requeue < 0)
		return -EINVAL;

1884 1885 1886 1887 1888 1889 1890 1891 1892
	/*
	 * When PI not supported: return -ENOSYS if requeue_pi is true,
	 * consequently the compiler knows requeue_pi is always false past
	 * this point which will optimize away all the conditional code
	 * further down.
	 */
	if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
		return -ENOSYS;

1893
	if (requeue_pi) {
1894 1895 1896 1897 1898 1899 1900
		/*
		 * Requeue PI only works on two distinct uaddrs. This
		 * check is only valid for private futexes. See below.
		 */
		if (uaddr1 == uaddr2)
			return -EINVAL;

1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
1920

1921
retry:
1922
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
L
Linus Torvalds 已提交
1923 1924
	if (unlikely(ret != 0))
		goto out;
1925 1926
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
			    requeue_pi ? VERIFY_WRITE : VERIFY_READ);
L
Linus Torvalds 已提交
1927
	if (unlikely(ret != 0))
1928
		goto out_put_key1;
L
Linus Torvalds 已提交
1929

1930 1931 1932 1933 1934 1935 1936 1937 1938
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (requeue_pi && match_futex(&key1, &key2)) {
		ret = -EINVAL;
		goto out_put_keys;
	}

1939 1940
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
1941

D
Darren Hart 已提交
1942
retry_private:
1943
	hb_waiters_inc(hb2);
I
Ingo Molnar 已提交
1944
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1945

1946 1947
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
1948

1949
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
1950 1951

		if (unlikely(ret)) {
D
Darren Hart 已提交
1952
			double_unlock_hb(hb1, hb2);
1953
			hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
1954

1955
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
1956 1957
			if (ret)
				goto out_put_keys;
L
Linus Torvalds 已提交
1958

1959
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1960
				goto retry_private;
L
Linus Torvalds 已提交
1961

1962 1963
			put_futex_key(&key2);
			put_futex_key(&key1);
D
Darren Hart 已提交
1964
			goto retry;
L
Linus Torvalds 已提交
1965
		}
1966
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
1967 1968 1969 1970 1971
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

1972
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1973 1974 1975 1976 1977 1978
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
1979
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1980
						 &key2, &pi_state, nr_requeue);
1981 1982 1983 1984 1985

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
1986 1987
		 * reference to it. If the lock was taken, ret contains the
		 * vpid of the top waiter task.
1988 1989
		 * If the lock was not taken, we have pi_state and an initial
		 * refcount on it. In case of an error we have nothing.
1990
		 */
1991
		if (ret > 0) {
1992
			WARN_ON(pi_state);
1993
			drop_count++;
1994
			task_count++;
1995
			/*
1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
			 * If we acquired the lock, then the user space value
			 * of uaddr2 should be vpid. It cannot be changed by
			 * the top waiter as it is blocked on hb2 lock if it
			 * tries to do so. If something fiddled with it behind
			 * our back the pi state lookup might unearth it. So
			 * we rather use the known value than rereading and
			 * handing potential crap to lookup_pi_state.
			 *
			 * If that call succeeds then we have pi_state and an
			 * initial refcount on it.
2006
			 */
P
Peter Zijlstra 已提交
2007
			ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
2008 2009 2010 2011
		}

		switch (ret) {
		case 0:
2012
			/* We hold a reference on the pi state. */
2013
			break;
2014 2015

			/* If the above failed, then pi_state is NULL */
2016 2017
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
2018
			hb_waiters_dec(hb2);
2019 2020
			put_futex_key(&key2);
			put_futex_key(&key1);
2021
			ret = fault_in_user_writeable(uaddr2);
2022 2023 2024 2025
			if (!ret)
				goto retry;
			goto out;
		case -EAGAIN:
2026 2027 2028 2029 2030 2031
			/*
			 * Two reasons for this:
			 * - Owner is exiting and we just wait for the
			 *   exit to complete.
			 * - The user space value changed.
			 */
2032
			double_unlock_hb(hb1, hb2);
2033
			hb_waiters_dec(hb2);
2034 2035
			put_futex_key(&key2);
			put_futex_key(&key1);
2036 2037 2038 2039 2040 2041 2042
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

J
Jason Low 已提交
2043
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2044 2045 2046 2047
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
2048
			continue;
2049

2050 2051 2052
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
2053 2054 2055
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
2056 2057
		 */
		if ((requeue_pi && !this->rt_waiter) ||
2058 2059
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
2060 2061 2062
			ret = -EINVAL;
			break;
		}
2063 2064 2065 2066 2067 2068 2069

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
2070
			mark_wake_futex(&wake_q, this);
2071 2072
			continue;
		}
L
Linus Torvalds 已提交
2073

2074 2075 2076 2077 2078 2079
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

2080 2081 2082 2083 2084
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
2085 2086 2087 2088 2089
			/*
			 * Prepare the waiter to take the rt_mutex. Take a
			 * refcount on the pi_state and store the pointer in
			 * the futex_q object of the waiter.
			 */
P
Peter Zijlstra 已提交
2090
			get_pi_state(pi_state);
2091 2092 2093
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
2094
							this->task);
2095
			if (ret == 1) {
2096 2097 2098 2099 2100 2101 2102 2103
				/*
				 * We got the lock. We do neither drop the
				 * refcount on pi_state nor clear
				 * this->pi_state because the waiter needs the
				 * pi_state for cleaning up the user space
				 * value. It will drop the refcount after
				 * doing so.
				 */
2104
				requeue_pi_wake_futex(this, &key2, hb2);
2105
				drop_count++;
2106 2107
				continue;
			} else if (ret) {
2108 2109 2110 2111 2112 2113 2114 2115
				/*
				 * rt_mutex_start_proxy_lock() detected a
				 * potential deadlock when we tried to queue
				 * that waiter. Drop the pi_state reference
				 * which we took above and remove the pointer
				 * to the state from the waiters futex_q
				 * object.
				 */
2116
				this->pi_state = NULL;
2117
				put_pi_state(pi_state);
2118 2119 2120 2121 2122
				/*
				 * We stop queueing more waiters and let user
				 * space deal with the mess.
				 */
				break;
2123
			}
L
Linus Torvalds 已提交
2124
		}
2125 2126
		requeue_futex(this, hb1, hb2, &key2);
		drop_count++;
L
Linus Torvalds 已提交
2127 2128
	}

2129 2130 2131 2132 2133
	/*
	 * We took an extra initial reference to the pi_state either
	 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
	 * need to drop it here again.
	 */
2134
	put_pi_state(pi_state);
2135 2136

out_unlock:
D
Darren Hart 已提交
2137
	double_unlock_hb(hb1, hb2);
2138
	wake_up_q(&wake_q);
2139
	hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
2140

2141 2142 2143 2144 2145 2146
	/*
	 * drop_futex_key_refs() must be called outside the spinlocks. During
	 * the requeue we moved futex_q's from the hash bucket at key1 to the
	 * one at key2 and updated their key pointer.  We no longer need to
	 * hold the references to key1.
	 */
L
Linus Torvalds 已提交
2147
	while (--drop_count >= 0)
2148
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
2149

2150
out_put_keys:
2151
	put_futex_key(&key2);
2152
out_put_key1:
2153
	put_futex_key(&key1);
2154
out:
2155
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
2156 2157 2158
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
2159
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2160
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
2161
{
2162
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
2163

2164
	hb = hash_futex(&q->key);
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175

	/*
	 * Increment the counter before taking the lock so that
	 * a potential waker won't miss a to-be-slept task that is
	 * waiting for the spinlock. This is safe as all queue_lock()
	 * users end up calling queue_me(). Similarly, for housekeeping,
	 * decrement the counter at queue_unlock() when some error has
	 * occurred and we don't end up adding the task to the list.
	 */
	hb_waiters_inc(hb);

2176
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
2177

2178
	spin_lock(&hb->lock); /* implies smp_mb(); (A) */
2179
	return hb;
L
Linus Torvalds 已提交
2180 2181
}

2182
static inline void
J
Jason Low 已提交
2183
queue_unlock(struct futex_hash_bucket *hb)
2184
	__releases(&hb->lock)
2185 2186
{
	spin_unlock(&hb->lock);
2187
	hb_waiters_dec(hb);
2188 2189
}

2190
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
L
Linus Torvalds 已提交
2191
{
P
Pierre Peiffer 已提交
2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
2206
	q->task = current;
2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
	__releases(&hb->lock)
{
	__queue_me(q, hb);
2225
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
2226 2227
}

2228 2229 2230 2231 2232 2233 2234
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
2235
 * Return:
2236 2237
 *  - 1 - if the futex_q was still queued (and we removed unqueued it);
 *  - 0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
2238 2239 2240 2241
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
2242
	int ret = 0;
L
Linus Torvalds 已提交
2243 2244

	/* In the common case we don't take the spinlock, which is nice. */
2245
retry:
2246 2247 2248 2249 2250 2251
	/*
	 * q->lock_ptr can change between this read and the following spin_lock.
	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
	 * optimizing lock_ptr out of the logic below.
	 */
	lock_ptr = READ_ONCE(q->lock_ptr);
2252
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
2271
		__unqueue_futex(q);
2272 2273 2274

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
2275 2276 2277 2278
		spin_unlock(lock_ptr);
		ret = 1;
	}

2279
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
2280 2281 2282
	return ret;
}

2283 2284
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
2285 2286
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
2287
 */
P
Pierre Peiffer 已提交
2288
static void unqueue_me_pi(struct futex_q *q)
2289
	__releases(q->lock_ptr)
2290
{
2291
	__unqueue_futex(q);
2292 2293

	BUG_ON(!q->pi_state);
2294
	put_pi_state(q->pi_state);
2295 2296
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
2297
	spin_unlock(q->lock_ptr);
2298 2299
}

2300
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2301
				struct task_struct *argowner)
P
Pierre Peiffer 已提交
2302 2303
{
	struct futex_pi_state *pi_state = q->pi_state;
2304
	u32 uval, uninitialized_var(curval), newval;
2305 2306
	struct task_struct *oldowner, *newowner;
	u32 newtid;
D
Darren Hart 已提交
2307
	int ret;
P
Pierre Peiffer 已提交
2308

2309 2310
	lockdep_assert_held(q->lock_ptr);

P
Peter Zijlstra 已提交
2311 2312 2313
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	oldowner = pi_state->owner;
2314 2315

	/*
2316
	 * We are here because either:
2317
	 *
2318 2319 2320 2321 2322 2323 2324 2325 2326
	 *  - we stole the lock and pi_state->owner needs updating to reflect
	 *    that (@argowner == current),
	 *
	 * or:
	 *
	 *  - someone stole our lock and we need to fix things to point to the
	 *    new owner (@argowner == NULL).
	 *
	 * Either way, we have to replace the TID in the user space variable.
2327
	 * This must be atomic as we have to preserve the owner died bit here.
2328
	 *
D
Darren Hart 已提交
2329 2330 2331
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
2332
	 *
P
Peter Zijlstra 已提交
2333 2334 2335 2336
	 * Modifying pi_state _before_ the user space value would leave the
	 * pi_state in an inconsistent state when we fault here, because we
	 * need to drop the locks to handle the fault. This might be observed
	 * in the PID check in lookup_pi_state.
2337 2338
	 */
retry:
2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373
	if (!argowner) {
		if (oldowner != current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}

		if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
			/* We got the lock after all, nothing to fix. */
			ret = 0;
			goto out_unlock;
		}

		/*
		 * Since we just failed the trylock; there must be an owner.
		 */
		newowner = rt_mutex_owner(&pi_state->pi_mutex);
		BUG_ON(!newowner);
	} else {
		WARN_ON_ONCE(argowner != current);
		if (oldowner == current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}
		newowner = argowner;
	}

	newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Peter Zijlstra 已提交
2374 2375 2376
	/* Owner died? */
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;
2377

2378 2379 2380
	if (get_futex_value_locked(&uval, uaddr))
		goto handle_fault;

2381
	for (;;) {
2382 2383
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

2384
		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
			goto handle_fault;
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
2395
	if (pi_state->owner != NULL) {
P
Peter Zijlstra 已提交
2396
		raw_spin_lock(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
2397 2398
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
P
Peter Zijlstra 已提交
2399
		raw_spin_unlock(&pi_state->owner->pi_lock);
2400
	}
P
Pierre Peiffer 已提交
2401

2402
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
2403

P
Peter Zijlstra 已提交
2404
	raw_spin_lock(&newowner->pi_lock);
P
Pierre Peiffer 已提交
2405
	WARN_ON(!list_empty(&pi_state->list));
2406
	list_add(&pi_state->list, &newowner->pi_state_list);
P
Peter Zijlstra 已提交
2407 2408 2409
	raw_spin_unlock(&newowner->pi_lock);
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

2410
	return 0;
P
Pierre Peiffer 已提交
2411 2412

	/*
P
Peter Zijlstra 已提交
2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
	 * To handle the page fault we need to drop the locks here. That gives
	 * the other task (either the highest priority waiter itself or the
	 * task which stole the rtmutex) the chance to try the fixup of the
	 * pi_state. So once we are back from handling the fault we need to
	 * check the pi_state after reacquiring the locks and before trying to
	 * do another fixup. When the fixup has been done already we simply
	 * return.
	 *
	 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
	 * drop hb->lock since the caller owns the hb -> futex_q relation.
	 * Dropping the pi_mutex->wait_lock requires the state revalidate.
P
Pierre Peiffer 已提交
2424
	 */
2425
handle_fault:
P
Peter Zijlstra 已提交
2426
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2427
	spin_unlock(q->lock_ptr);
2428

2429
	ret = fault_in_user_writeable(uaddr);
2430

2431
	spin_lock(q->lock_ptr);
P
Peter Zijlstra 已提交
2432
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2433

2434 2435 2436
	/*
	 * Check if someone else fixed it for us:
	 */
P
Peter Zijlstra 已提交
2437 2438 2439 2440
	if (pi_state->owner != oldowner) {
		ret = 0;
		goto out_unlock;
	}
2441 2442

	if (ret)
P
Peter Zijlstra 已提交
2443
		goto out_unlock;
2444 2445

	goto retry;
P
Peter Zijlstra 已提交
2446 2447 2448 2449

out_unlock:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
P
Pierre Peiffer 已提交
2450 2451
}

N
Nick Piggin 已提交
2452
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
2453

2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
2464
 * Return:
2465 2466 2467
 *  -  1 - success, lock taken;
 *  -  0 - success, lock not taken;
 *  - <0 - on error (-EFAULT)
2468
 */
2469
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2470 2471 2472 2473 2474 2475 2476
{
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
2477
		 *
2478 2479 2480
		 * Speculative pi_state->owner read (we don't hold wait_lock);
		 * since we own the lock pi_state->owner == current is the
		 * stable state, anything else needs more attention.
2481 2482
		 */
		if (q->pi_state->owner != current)
2483
			ret = fixup_pi_state_owner(uaddr, q, current);
2484 2485 2486
		goto out;
	}

2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
	/*
	 * If we didn't get the lock; check if anybody stole it from us. In
	 * that case, we need to fix up the uval to point to them instead of
	 * us, otherwise bad things happen. [10]
	 *
	 * Another speculative read; pi_state->owner == current is unstable
	 * but needs our attention.
	 */
	if (q->pi_state->owner == current) {
		ret = fixup_pi_state_owner(uaddr, q, NULL);
		goto out;
	}

2500 2501
	/*
	 * Paranoia check. If we did not take the lock, then we should not be
2502
	 * the owner of the rt_mutex.
2503
	 */
2504
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
2505 2506 2507 2508
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);
2509
	}
2510 2511 2512 2513 2514

out:
	return ret ? ret : locked;
}

2515 2516 2517 2518 2519 2520 2521
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
2522
				struct hrtimer_sleeper *timeout)
2523
{
2524 2525
	/*
	 * The task state is guaranteed to be set before another task can
2526
	 * wake it. set_current_state() is implemented using smp_store_mb() and
2527 2528 2529
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
2530
	set_current_state(TASK_INTERRUPTIBLE);
2531
	queue_me(q, hb);
2532 2533

	/* Arm the timer */
2534
	if (timeout)
2535 2536 2537
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);

	/*
2538 2539
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
2540 2541 2542 2543 2544 2545 2546 2547
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
2548
			freezable_schedule();
2549 2550 2551 2552
	}
	__set_current_state(TASK_RUNNING);
}

2553 2554 2555 2556
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
2557
 * @flags:	futex flags (FLAGS_SHARED, etc.)
2558 2559 2560 2561 2562 2563 2564 2565
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
2566
 * Return:
2567 2568
 *  -  0 - uaddr contains val and hb has been locked;
 *  - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2569
 */
2570
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2571
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
2572
{
2573 2574
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
2575 2576

	/*
D
Darren Hart 已提交
2577
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
2578 2579 2580 2581 2582 2583 2584
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
2585 2586
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
2587 2588
	 * cond(var) false, which would violate the guarantee.
	 *
2589 2590 2591 2592
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
2593
	 */
2594
retry:
2595
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2596
	if (unlikely(ret != 0))
2597
		return ret;
2598 2599 2600 2601

retry_private:
	*hb = queue_lock(q);

2602
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
2603

2604
	if (ret) {
J
Jason Low 已提交
2605
		queue_unlock(*hb);
L
Linus Torvalds 已提交
2606

2607
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
2608
		if (ret)
2609
			goto out;
L
Linus Torvalds 已提交
2610

2611
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2612 2613
			goto retry_private;

2614
		put_futex_key(&q->key);
D
Darren Hart 已提交
2615
		goto retry;
L
Linus Torvalds 已提交
2616
	}
2617

2618
	if (uval != val) {
J
Jason Low 已提交
2619
		queue_unlock(*hb);
2620
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
2621
	}
L
Linus Torvalds 已提交
2622

2623 2624
out:
	if (ret)
2625
		put_futex_key(&q->key);
2626 2627 2628
	return ret;
}

2629 2630
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
2631 2632 2633 2634
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
2635
	struct futex_q q = futex_q_init;
2636 2637 2638 2639 2640 2641 2642 2643 2644
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

	if (abs_time) {
		to = &timeout;

2645 2646 2647
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2648 2649 2650 2651 2652
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

T
Thomas Gleixner 已提交
2653
retry:
2654 2655 2656 2657
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
2658
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2659 2660 2661
	if (ret)
		goto out;

2662
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
2663
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
2664 2665

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
2666
	ret = 0;
2667
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
2668
	if (!unqueue_me(&q))
2669
		goto out;
P
Peter Zijlstra 已提交
2670
	ret = -ETIMEDOUT;
2671
	if (to && !to->task)
2672
		goto out;
N
Nick Piggin 已提交
2673

2674
	/*
T
Thomas Gleixner 已提交
2675 2676
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
2677
	 */
2678
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
2679 2680
		goto retry;

P
Peter Zijlstra 已提交
2681
	ret = -ERESTARTSYS;
2682
	if (!abs_time)
2683
		goto out;
L
Linus Torvalds 已提交
2684

2685
	restart = &current->restart_block;
P
Peter Zijlstra 已提交
2686
	restart->fn = futex_wait_restart;
2687
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
2688
	restart->futex.val = val;
T
Thomas Gleixner 已提交
2689
	restart->futex.time = *abs_time;
P
Peter Zijlstra 已提交
2690
	restart->futex.bitset = bitset;
2691
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2692

P
Peter Zijlstra 已提交
2693 2694
	ret = -ERESTART_RESTARTBLOCK;

2695
out:
2696 2697 2698 2699
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
2700 2701 2702
	return ret;
}

N
Nick Piggin 已提交
2703 2704 2705

static long futex_wait_restart(struct restart_block *restart)
{
2706
	u32 __user *uaddr = restart->futex.uaddr;
2707
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
2708

2709
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
T
Thomas Gleixner 已提交
2710
		t = restart->futex.time;
2711 2712
		tp = &t;
	}
N
Nick Piggin 已提交
2713
	restart->fn = do_no_restart_syscall;
2714 2715 2716

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
2717 2718 2719
}


2720 2721 2722
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
2723 2724 2725 2726 2727
 * if there are waiters then it will block as a consequence of relying
 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
 * a 0 value of the futex too.).
 *
 * Also serves as futex trylock_pi()'ing, and due semantics.
2728
 */
2729
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2730
			 ktime_t *time, int trylock)
2731
{
2732
	struct hrtimer_sleeper timeout, *to = NULL;
2733
	struct futex_pi_state *pi_state = NULL;
2734
	struct rt_mutex_waiter rt_waiter;
2735
	struct futex_hash_bucket *hb;
2736
	struct futex_q q = futex_q_init;
2737
	int res, ret;
2738

2739 2740 2741
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2742 2743 2744
	if (refill_pi_state_cache())
		return -ENOMEM;

2745
	if (time) {
2746
		to = &timeout;
2747 2748
		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
				      HRTIMER_MODE_ABS);
2749
		hrtimer_init_sleeper(to, current);
2750
		hrtimer_set_expires(&to->timer, *time);
2751 2752
	}

2753
retry:
2754
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2755
	if (unlikely(ret != 0))
2756
		goto out;
2757

D
Darren Hart 已提交
2758
retry_private:
E
Eric Sesterhenn 已提交
2759
	hb = queue_lock(&q);
2760

2761
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2762
	if (unlikely(ret)) {
2763 2764 2765 2766
		/*
		 * Atomic work succeeded and we got the lock,
		 * or failed. Either way, we do _not_ block.
		 */
2767
		switch (ret) {
2768 2769 2770 2771 2772 2773
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2774 2775
		case -EAGAIN:
			/*
2776 2777 2778 2779
			 * Two reasons for this:
			 * - Task is exiting and we just wait for the
			 *   exit to complete.
			 * - The user space value changed.
2780
			 */
J
Jason Low 已提交
2781
			queue_unlock(hb);
2782
			put_futex_key(&q.key);
2783 2784 2785
			cond_resched();
			goto retry;
		default:
2786
			goto out_unlock_put_key;
2787 2788 2789
		}
	}

2790 2791
	WARN_ON(!q.pi_state);

2792 2793 2794
	/*
	 * Only actually queue now that the atomic ops are done:
	 */
2795
	__queue_me(&q, hb);
2796

2797
	if (trylock) {
2798
		ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2799 2800
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
2801
		goto no_block;
2802 2803
	}

2804 2805
	rt_mutex_init_waiter(&rt_waiter);

2806
	/*
2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
	 * hold it while doing rt_mutex_start_proxy(), because then it will
	 * include hb->lock in the blocking chain, even through we'll not in
	 * fact hold it while blocking. This will lead it to report -EDEADLK
	 * and BUG when futex_unlock_pi() interleaves with this.
	 *
	 * Therefore acquire wait_lock while holding hb->lock, but drop the
	 * latter before calling rt_mutex_start_proxy_lock(). This still fully
	 * serializes against futex_unlock_pi() as that does the exact same
	 * lock handoff sequence.
2817
	 */
2818 2819 2820 2821 2822
	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
	spin_unlock(q.lock_ptr);
	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);

2823 2824 2825 2826
	if (ret) {
		if (ret == 1)
			ret = 0;

2827
		spin_lock(q.lock_ptr);
2828 2829 2830 2831 2832 2833 2834 2835 2836
		goto no_block;
	}


	if (unlikely(to))
		hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);

	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);

2837
	spin_lock(q.lock_ptr);
2838 2839 2840 2841 2842
	/*
	 * If we failed to acquire the lock (signal/timeout), we must
	 * first acquire the hb->lock before removing the lock from the
	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
	 * wait lists consistent.
2843 2844 2845
	 *
	 * In particular; it is important that futex_unlock_pi() can not
	 * observe this inconsistency.
2846 2847 2848 2849 2850
	 */
	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
		ret = 0;

no_block:
2851 2852 2853 2854
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
2855
	res = fixup_owner(uaddr, &q, !ret);
2856 2857 2858 2859 2860 2861
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
2862

2863
	/*
2864 2865
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
2866
	 */
2867 2868 2869 2870
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
		pi_state = q.pi_state;
		get_pi_state(pi_state);
	}
2871

2872 2873
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
2874

2875 2876 2877 2878 2879
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

2880
	goto out_put_key;
2881

2882
out_unlock_put_key:
J
Jason Low 已提交
2883
	queue_unlock(hb);
2884

2885
out_put_key:
2886
	put_futex_key(&q.key);
2887
out:
2888 2889
	if (to) {
		hrtimer_cancel(&to->timer);
2890
		destroy_hrtimer_on_stack(&to->timer);
2891
	}
2892
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2893

2894
uaddr_faulted:
J
Jason Low 已提交
2895
	queue_unlock(hb);
2896

2897
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
2898 2899
	if (ret)
		goto out_put_key;
2900

2901
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2902 2903
		goto retry_private;

2904
	put_futex_key(&q.key);
D
Darren Hart 已提交
2905
	goto retry;
2906 2907 2908 2909 2910 2911 2912
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
2913
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2914
{
2915
	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2916
	union futex_key key = FUTEX_KEY_INIT;
2917
	struct futex_hash_bucket *hb;
2918
	struct futex_q *top_waiter;
D
Darren Hart 已提交
2919
	int ret;
2920

2921 2922 2923
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2924 2925 2926 2927 2928 2929
retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
2930
	if ((uval & FUTEX_TID_MASK) != vpid)
2931 2932
		return -EPERM;

2933
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2934 2935
	if (ret)
		return ret;
2936 2937 2938 2939 2940

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
2941 2942 2943
	 * Check waiters first. We do not trust user space values at
	 * all and we at least want to know if user space fiddled
	 * with the futex value instead of blindly unlocking.
2944
	 */
2945 2946
	top_waiter = futex_top_waiter(hb, &key);
	if (top_waiter) {
2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
		struct futex_pi_state *pi_state = top_waiter->pi_state;

		ret = -EINVAL;
		if (!pi_state)
			goto out_unlock;

		/*
		 * If current does not own the pi_state then the futex is
		 * inconsistent and user space fiddled with the futex value.
		 */
		if (pi_state->owner != current)
			goto out_unlock;

2960
		get_pi_state(pi_state);
2961
		/*
2962 2963 2964 2965
		 * By taking wait_lock while still holding hb->lock, we ensure
		 * there is no point where we hold neither; and therefore
		 * wake_futex_pi() must observe a state consistent with what we
		 * observed.
2966
		 */
2967
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2968 2969
		spin_unlock(&hb->lock);

2970
		/* drops pi_state->pi_mutex.wait_lock */
2971 2972 2973 2974 2975 2976
		ret = wake_futex_pi(uaddr, uval, pi_state);

		put_pi_state(pi_state);

		/*
		 * Success, we're done! No tricky corner cases.
2977 2978 2979
		 */
		if (!ret)
			goto out_putkey;
2980
		/*
2981 2982
		 * The atomic access to the futex value generated a
		 * pagefault, so retry the user-access and the wakeup:
2983 2984 2985
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
2986 2987 2988 2989 2990 2991 2992 2993
		/*
		 * A unconditional UNLOCK_PI op raced against a waiter
		 * setting the FUTEX_WAITERS bit. Try again.
		 */
		if (ret == -EAGAIN) {
			put_futex_key(&key);
			goto retry;
		}
2994 2995 2996 2997
		/*
		 * wake_futex_pi has detected invalid state. Tell user
		 * space.
		 */
2998
		goto out_putkey;
2999
	}
3000

3001
	/*
3002 3003 3004 3005 3006
	 * We have no kernel internal state, i.e. no waiters in the
	 * kernel. Waiters which are about to queue themselves are stuck
	 * on hb->lock. So we can safely ignore them. We do neither
	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
	 * owner.
3007
	 */
3008 3009
	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
		spin_unlock(&hb->lock);
3010
		goto pi_faulted;
3011
	}
3012

3013 3014 3015 3016 3017
	/*
	 * If uval has changed, let user space handle it.
	 */
	ret = (curval == uval) ? 0 : -EAGAIN;

3018 3019
out_unlock:
	spin_unlock(&hb->lock);
3020
out_putkey:
3021
	put_futex_key(&key);
3022 3023 3024
	return ret;

pi_faulted:
3025
	put_futex_key(&key);
3026

3027
	ret = fault_in_user_writeable(uaddr);
3028
	if (!ret)
3029 3030
		goto retry;

L
Linus Torvalds 已提交
3031 3032 3033
	return ret;
}

3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
3046
 * Return:
3047 3048
 *  -  0 = no early wakeup detected;
 *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
3070
		plist_del(&q->list, &hb->chain);
3071
		hb_waiters_dec(hb);
3072

T
Thomas Gleixner 已提交
3073
		/* Handle spurious wakeups gracefully */
3074
		ret = -EWOULDBLOCK;
3075 3076
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
3077
		else if (signal_pending(current))
3078
			ret = -ERESTARTNOINTR;
3079 3080 3081 3082 3083 3084
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3085
 * @uaddr:	the futex we initially wait on (non-pi)
3086
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3087
 *		the same type, no requeueing from private to shared, etc.
3088 3089
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
3090
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
3091 3092 3093
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3094 3095 3096 3097 3098
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
3099 3100
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3101
 * via the following--
3102
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3103 3104 3105
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
3106
 *
3107
 * If 3, cleanup and return -ERESTARTNOINTR.
3108 3109 3110 3111 3112 3113 3114
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
3115
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3116 3117 3118
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
3119
 * Return:
3120 3121
 *  -  0 - On success;
 *  - <0 - On error
3122
 */
3123
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3124
				 u32 val, ktime_t *abs_time, u32 bitset,
3125
				 u32 __user *uaddr2)
3126 3127
{
	struct hrtimer_sleeper timeout, *to = NULL;
3128
	struct futex_pi_state *pi_state = NULL;
3129 3130
	struct rt_mutex_waiter rt_waiter;
	struct futex_hash_bucket *hb;
3131 3132
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
3133 3134
	int res, ret;

3135 3136 3137
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

3138 3139 3140
	if (uaddr == uaddr2)
		return -EINVAL;

3141 3142 3143 3144 3145
	if (!bitset)
		return -EINVAL;

	if (abs_time) {
		to = &timeout;
3146 3147 3148
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
3149 3150 3151 3152 3153 3154 3155 3156 3157
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
3158
	rt_mutex_init_waiter(&rt_waiter);
3159

3160
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
3161 3162 3163
	if (unlikely(ret != 0))
		goto out;

3164 3165 3166 3167
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

3168 3169 3170 3171
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
3172
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
3173 3174
	if (ret)
		goto out_key2;
3175

3176 3177 3178 3179 3180
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (match_futex(&q.key, &key2)) {
3181
		queue_unlock(hb);
3182 3183 3184 3185
		ret = -EINVAL;
		goto out_put_keys;
	}

3186
	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
3187
	futex_wait_queue_me(hb, &q, to);
3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
		goto out_put_keys;

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
3199 3200 3201
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
3212
			ret = fixup_pi_state_owner(uaddr2, &q, current);
3213 3214 3215 3216
			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
				pi_state = q.pi_state;
				get_pi_state(pi_state);
			}
3217 3218 3219 3220
			/*
			 * Drop the reference to the pi state which
			 * the requeue_pi() code acquired for us.
			 */
3221
			put_pi_state(q.pi_state);
3222 3223 3224
			spin_unlock(q.lock_ptr);
		}
	} else {
3225 3226
		struct rt_mutex *pi_mutex;

3227 3228 3229 3230 3231
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
3232
		WARN_ON(!q.pi_state);
3233
		pi_mutex = &q.pi_state->pi_mutex;
3234
		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3235 3236

		spin_lock(q.lock_ptr);
3237 3238 3239 3240
		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
			ret = 0;

		debug_rt_mutex_free_waiter(&rt_waiter);
3241 3242 3243 3244
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
3245
		res = fixup_owner(uaddr2, &q, !ret);
3246 3247
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
3248
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3249 3250 3251 3252
		 */
		if (res)
			ret = (res < 0) ? res : 0;

3253 3254 3255 3256 3257
		/*
		 * If fixup_pi_state_owner() faulted and was unable to handle
		 * the fault, unlock the rt_mutex and return the fault to
		 * userspace.
		 */
3258 3259 3260 3261
		if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
			pi_state = q.pi_state;
			get_pi_state(pi_state);
		}
3262

3263 3264 3265 3266
		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

3267 3268 3269 3270 3271
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

3272
	if (ret == -EINTR) {
3273
		/*
3274 3275 3276 3277 3278
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
3279
		 */
3280
		ret = -EWOULDBLOCK;
3281 3282 3283
	}

out_put_keys:
3284
	put_futex_key(&q.key);
T
Thomas Gleixner 已提交
3285
out_key2:
3286
	put_futex_key(&key2);
3287 3288 3289 3290 3291 3292 3293 3294 3295

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

3296 3297 3298 3299 3300 3301 3302
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
3303
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3304 3305 3306 3307 3308 3309 3310 3311
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
3312 3313 3314
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
3315
 */
3316 3317
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
3318
{
3319 3320
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
3333 3334 3335 3336
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
3337
 */
3338 3339 3340
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
3341
{
A
Al Viro 已提交
3342
	struct robust_list_head __user *head;
3343
	unsigned long ret;
3344
	struct task_struct *p;
3345

3346 3347 3348
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

3349 3350 3351
	rcu_read_lock();

	ret = -ESRCH;
3352
	if (!pid)
3353
		p = current;
3354
	else {
3355
		p = find_task_by_vpid(pid);
3356 3357 3358 3359
		if (!p)
			goto err_unlock;
	}

3360
	ret = -EPERM;
3361
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3362 3363 3364 3365 3366
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

3367 3368 3369 3370 3371
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
3372
	rcu_read_unlock();
3373 3374 3375 3376 3377 3378 3379 3380

	return ret;
}

/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
3381
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3382
{
3383
	u32 uval, uninitialized_var(nval), mval;
3384

3385 3386
retry:
	if (get_user(uval, uaddr))
3387 3388
		return -1;

3389
	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
		/*
		 * Ok, this dying thread is truly holding a futex
		 * of interest. Set the OWNER_DIED bit atomically
		 * via cmpxchg, and if the value had FUTEX_WAITERS
		 * set, wake up a waiter (if any). (We have to do a
		 * futex_wake() even if OWNER_DIED is already set -
		 * to handle the rare but possible case of recursive
		 * thread-death.) The rest of the cleanup is done in
		 * userspace.
		 */
3400
		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414
		/*
		 * We are not holding a lock here, but we want to have
		 * the pagefault_disable/enable() protection because
		 * we want to handle the fault gracefully. If the
		 * access fails we try to fault in the futex with R/W
		 * verification via get_user_pages. get_user() above
		 * does not guarantee R/W access. If that fails we
		 * give up and leave the futex locked.
		 */
		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
		}
3415
		if (nval != uval)
3416
			goto retry;
3417

3418 3419 3420 3421
		/*
		 * Wake robust non-PI futexes here. The wakeup of
		 * PI futexes happens in exit_pi_state():
		 */
T
Thomas Gleixner 已提交
3422
		if (!pi && (uval & FUTEX_WAITERS))
P
Peter Zijlstra 已提交
3423
			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3424 3425 3426 3427
	}
	return 0;
}

3428 3429 3430 3431
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
3432
				     struct robust_list __user * __user *head,
3433
				     unsigned int *pi)
3434 3435 3436
{
	unsigned long uentry;

A
Al Viro 已提交
3437
	if (get_user(uentry, (unsigned long __user *)head))
3438 3439
		return -EFAULT;

A
Al Viro 已提交
3440
	*entry = (void __user *)(uentry & ~1UL);
3441 3442 3443 3444 3445
	*pi = uentry & 1;

	return 0;
}

3446 3447 3448 3449 3450 3451 3452 3453 3454
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void exit_robust_list(struct task_struct *curr)
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
3455
	struct robust_list __user *entry, *next_entry, *pending;
3456 3457
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
3458
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
3459
	int rc;
3460

3461 3462 3463
	if (!futex_cmpxchg_enabled)
		return;

3464 3465 3466 3467
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
3468
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
3479
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3480
		return;
3481

M
Martin Schwidefsky 已提交
3482
	next_entry = NULL;	/* avoid warning with gcc */
3483
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
3484 3485 3486 3487 3488
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3489 3490
		/*
		 * A pending lock might already be on the list, so
3491
		 * don't process it twice:
3492 3493
		 */
		if (entry != pending)
A
Al Viro 已提交
3494
			if (handle_futex_death((void __user *)entry + futex_offset,
3495
						curr, pi))
3496
				return;
M
Martin Schwidefsky 已提交
3497
		if (rc)
3498
			return;
M
Martin Schwidefsky 已提交
3499 3500
		entry = next_entry;
		pi = next_pi;
3501 3502 3503 3504 3505 3506 3507 3508
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
3509 3510 3511 3512

	if (pending)
		handle_futex_death((void __user *)pending + futex_offset,
				   curr, pip);
3513 3514
}

3515
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3516
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
3517
{
T
Thomas Gleixner 已提交
3518
	int cmd = op & FUTEX_CMD_MASK;
3519
	unsigned int flags = 0;
E
Eric Dumazet 已提交
3520 3521

	if (!(op & FUTEX_PRIVATE_FLAG))
3522
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
3523

3524 3525
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
3526 3527
		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
		    cmd != FUTEX_WAIT_REQUEUE_PI)
3528 3529
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
3530

3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
3541
	switch (cmd) {
L
Linus Torvalds 已提交
3542
	case FUTEX_WAIT:
3543 3544
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
3545
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
3546
	case FUTEX_WAKE:
3547 3548
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
3549
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
3550
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
3551
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
3552
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
3553
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3554
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
3555
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3556
	case FUTEX_LOCK_PI:
3557
		return futex_lock_pi(uaddr, flags, timeout, 0);
3558
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
3559
		return futex_unlock_pi(uaddr, flags);
3560
	case FUTEX_TRYLOCK_PI:
3561
		return futex_lock_pi(uaddr, flags, NULL, 1);
3562 3563
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
3564 3565
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
3566
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
3567
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
3568
	}
T
Thomas Gleixner 已提交
3569
	return -ENOSYS;
L
Linus Torvalds 已提交
3570 3571 3572
}


3573 3574 3575
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
		struct timespec __user *, utime, u32 __user *, uaddr2,
		u32, val3)
L
Linus Torvalds 已提交
3576
{
3577 3578
	struct timespec ts;
	ktime_t t, *tp = NULL;
3579
	u32 val2 = 0;
E
Eric Dumazet 已提交
3580
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
3581

3582
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3583 3584
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3585 3586
		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
			return -EFAULT;
3587
		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
L
Linus Torvalds 已提交
3588
			return -EFAULT;
3589
		if (!timespec_valid(&ts))
3590
			return -EINVAL;
3591 3592

		t = timespec_to_ktime(ts);
E
Eric Dumazet 已提交
3593
		if (cmd == FUTEX_WAIT)
3594
			t = ktime_add_safe(ktime_get(), t);
3595
		tp = &t;
L
Linus Torvalds 已提交
3596 3597
	}
	/*
3598
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3599
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
3600
	 */
3601
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3602
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3603
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
3604

3605
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
3606 3607
}

3608
static void __init futex_detect_cmpxchg(void)
L
Linus Torvalds 已提交
3609
{
3610
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3611
	u32 curval;
3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629

	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
	 * implementation, the non-functional ones will return
	 * -ENOSYS.
	 */
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
		futex_cmpxchg_enabled = 1;
#endif
}

static int __init futex_init(void)
{
3630
	unsigned int futex_shift;
3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
	unsigned long i;

#if CONFIG_BASE_SMALL
	futex_hashsize = 16;
#else
	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif

	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
					       futex_hashsize, 0,
					       futex_hashsize < 256 ? HASH_SMALL : 0,
3642 3643 3644
					       &futex_shift, NULL,
					       futex_hashsize, futex_hashsize);
	futex_hashsize = 1UL << futex_shift;
3645 3646

	futex_detect_cmpxchg();
3647

3648
	for (i = 0; i < futex_hashsize; i++) {
3649
		atomic_set(&futex_queues[i].waiters, 0);
3650
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
3651 3652 3653
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
3654 3655
	return 0;
}
3656
core_initcall(futex_init);