futex.c 103.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
11 12 13 14
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
15 16 17 18
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
19 20 21
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
22 23 24 25
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
47
#include <linux/compat.h>
L
Linus Torvalds 已提交
48 49 50 51 52 53 54 55 56 57
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
58
#include <linux/signal.h>
59
#include <linux/export.h>
60
#include <linux/magic.h>
61 62
#include <linux/pid.h>
#include <linux/nsproxy.h>
63
#include <linux/ptrace.h>
64
#include <linux/sched/rt.h>
65
#include <linux/sched/wake_q.h>
66
#include <linux/sched/mm.h>
67
#include <linux/hugetlb.h>
C
Colin Cross 已提交
68
#include <linux/freezer.h>
M
Mike Rapoport 已提交
69
#include <linux/memblock.h>
70
#include <linux/fault-inject.h>
71

72
#include <asm/futex.h>
L
Linus Torvalds 已提交
73

74
#include "locking/rtmutex_common.h"
75

76
/*
77 78 79 80
 * READ this before attempting to hack on futexes!
 *
 * Basic futex operation and ordering guarantees
 * =============================================
81 82 83 84
 *
 * The waiter reads the futex value in user space and calls
 * futex_wait(). This function computes the hash bucket and acquires
 * the hash bucket lock. After that it reads the futex user space value
85 86 87
 * again and verifies that the data has not changed. If it has not changed
 * it enqueues itself into the hash bucket, releases the hash bucket lock
 * and schedules.
88 89
 *
 * The waker side modifies the user space value of the futex and calls
90 91 92
 * futex_wake(). This function computes the hash bucket and acquires the
 * hash bucket lock. Then it looks for waiters on that futex in the hash
 * bucket and wakes them.
93
 *
94 95 96 97 98
 * In futex wake up scenarios where no tasks are blocked on a futex, taking
 * the hb spinlock can be avoided and simply return. In order for this
 * optimization to work, ordering guarantees must exist so that the waiter
 * being added to the list is acknowledged when the list is concurrently being
 * checked by the waker, avoiding scenarios like the following:
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
 *
 * CPU 0                               CPU 1
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
 *   uval = *futex;
 *                                     *futex = newval;
 *                                     sys_futex(WAKE, futex);
 *                                       futex_wake(futex);
 *                                       if (queue_empty())
 *                                         return;
 *   if (uval == val)
 *      lock(hash_bucket(futex));
 *      queue();
 *     unlock(hash_bucket(futex));
 *     schedule();
 *
 * This would cause the waiter on CPU 0 to wait forever because it
 * missed the transition of the user space value from val to newval
 * and the waker did not find the waiter in the hash bucket queue.
 *
120 121 122 123 124
 * The correct serialization ensures that a waiter either observes
 * the changed user space value before blocking or is woken by a
 * concurrent waker:
 *
 * CPU 0                                 CPU 1
125 126 127
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
128
 *
129
 *   waiters++; (a)
130 131 132 133 134 135 136 137 138 139
 *   smp_mb(); (A) <-- paired with -.
 *                                  |
 *   lock(hash_bucket(futex));      |
 *                                  |
 *   uval = *futex;                 |
 *                                  |        *futex = newval;
 *                                  |        sys_futex(WAKE, futex);
 *                                  |          futex_wake(futex);
 *                                  |
 *                                  `--------> smp_mb(); (B)
140
 *   if (uval == val)
141
 *     queue();
142
 *     unlock(hash_bucket(futex));
143 144
 *     schedule();                         if (waiters)
 *                                           lock(hash_bucket(futex));
145 146
 *   else                                    wake_waiters(futex);
 *     waiters--; (b)                        unlock(hash_bucket(futex));
147
 *
148 149
 * Where (A) orders the waiters increment and the futex value read through
 * atomic operations (see hb_waiters_inc) and where (B) orders the write
150 151
 * to futex and the waiters read -- this is done by the barriers for both
 * shared and private futexes in get_futex_key_refs().
152 153 154 155 156 157 158 159 160 161 162 163
 *
 * This yields the following case (where X:=waiters, Y:=futex):
 *
 *	X = Y = 0
 *
 *	w[X]=1		w[Y]=1
 *	MB		MB
 *	r[Y]=y		r[X]=x
 *
 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 * the guarantee that we cannot both miss the futex variable change and the
 * enqueue.
164 165 166 167 168 169 170 171 172 173 174
 *
 * Note that a new waiter is accounted for in (a) even when it is possible that
 * the wait call can return error, in which case we backtrack from it in (b).
 * Refer to the comment in queue_lock().
 *
 * Similarly, in order to account for waiters being requeued on another
 * address we always increment the waiters for the destination bucket before
 * acquiring the lock. It then decrements them again  after releasing it -
 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 * will do the additional required waiter count housekeeping. This is done for
 * double_lock_hb() and double_unlock_hb(), respectively.
175 176
 */

177 178 179 180
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
static int  __read_mostly futex_cmpxchg_enabled;
181
#endif
182

183 184 185 186
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
187 188 189 190 191 192 193 194 195
#ifdef CONFIG_MMU
# define FLAGS_SHARED		0x01
#else
/*
 * NOMMU does not have per process address space. Let the compiler optimize
 * code away.
 */
# define FLAGS_SHARED		0x00
#endif
196 197 198
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
	atomic_t refcount;

	union futex_key key;
218
} __randomize_layout;
219

220 221
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
222
 * @list:		priority-sorted list of tasks waiting on this futex
223 224 225 226 227 228 229 230
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
231
 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
L
Linus Torvalds 已提交
232 233 234
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
235
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
236
 * The order of wakeup is always to make the first condition true, then
237 238 239 240
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
241 242
 */
struct futex_q {
P
Pierre Peiffer 已提交
243
	struct plist_node list;
L
Linus Torvalds 已提交
244

245
	struct task_struct *task;
L
Linus Torvalds 已提交
246 247
	spinlock_t *lock_ptr;
	union futex_key key;
248
	struct futex_pi_state *pi_state;
249
	struct rt_mutex_waiter *rt_waiter;
250
	union futex_key *requeue_pi_key;
251
	u32 bitset;
252
} __randomize_layout;
L
Linus Torvalds 已提交
253

254 255 256 257 258 259
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
260
/*
D
Darren Hart 已提交
261 262 263
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
264 265
 */
struct futex_hash_bucket {
266
	atomic_t waiters;
P
Pierre Peiffer 已提交
267 268
	spinlock_t lock;
	struct plist_head chain;
269
} ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
270

271 272 273 274 275 276 277 278 279 280 281
/*
 * The base of the bucket array and its size are always used together
 * (after initialization only in hash_futex()), so ensure that they
 * reside in the same cacheline.
 */
static struct {
	struct futex_hash_bucket *queues;
	unsigned long            hashsize;
} __futex_data __read_mostly __aligned(2*sizeof(long));
#define futex_queues   (__futex_data.queues)
#define futex_hashsize (__futex_data.hashsize)
282

L
Linus Torvalds 已提交
283

284 285 286 287 288 289 290 291
/*
 * Fault injections for futexes.
 */
#ifdef CONFIG_FAIL_FUTEX

static struct {
	struct fault_attr attr;

292
	bool ignore_private;
293 294
} fail_futex = {
	.attr = FAULT_ATTR_INITIALIZER,
295
	.ignore_private = false,
296 297 298 299 300 301 302 303
};

static int __init setup_fail_futex(char *str)
{
	return setup_fault_attr(&fail_futex.attr, str);
}
__setup("fail_futex=", setup_fail_futex);

304
static bool should_fail_futex(bool fshared)
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
{
	if (fail_futex.ignore_private && !fshared)
		return false;

	return should_fail(&fail_futex.attr, 1);
}

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init fail_futex_debugfs(void)
{
	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
	struct dentry *dir;

	dir = fault_create_debugfs_attr("fail_futex", NULL,
					&fail_futex.attr);
	if (IS_ERR(dir))
		return PTR_ERR(dir);

	if (!debugfs_create_bool("ignore-private", mode, dir,
				 &fail_futex.ignore_private)) {
		debugfs_remove_recursive(dir);
		return -ENOMEM;
	}

	return 0;
}

late_initcall(fail_futex_debugfs);

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

#else
static inline bool should_fail_futex(bool fshared)
{
	return false;
}
#endif /* CONFIG_FAIL_FUTEX */

344 345
static inline void futex_get_mm(union futex_key *key)
{
V
Vegard Nossum 已提交
346
	mmgrab(key->private.mm);
347 348 349
	/*
	 * Ensure futex_get_mm() implies a full barrier such that
	 * get_futex_key() implies a full barrier. This is relied upon
350
	 * as smp_mb(); (B), see the ordering comment above.
351
	 */
352
	smp_mb__after_atomic();
353 354
}

355 356 357 358
/*
 * Reflects a new waiter being added to the waitqueue.
 */
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
359 360
{
#ifdef CONFIG_SMP
361
	atomic_inc(&hb->waiters);
362
	/*
363
	 * Full barrier (A), see the ordering comment above.
364
	 */
365
	smp_mb__after_atomic();
366 367 368 369 370 371 372 373 374 375 376 377 378
#endif
}

/*
 * Reflects a waiter being removed from the waitqueue by wakeup
 * paths.
 */
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	atomic_dec(&hb->waiters);
#endif
}
379

380 381 382 383
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	return atomic_read(&hb->waiters);
384
#else
385
	return 1;
386 387 388
#endif
}

389 390 391 392 393 394
/**
 * hash_futex - Return the hash bucket in the global hash
 * @key:	Pointer to the futex key for which the hash is calculated
 *
 * We hash on the keys returned from get_futex_key (see below) and return the
 * corresponding hash bucket in the global hash.
L
Linus Torvalds 已提交
395 396 397 398 399 400
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
401
	return &futex_queues[hash & (futex_hashsize - 1)];
L
Linus Torvalds 已提交
402 403
}

404 405 406 407 408 409

/**
 * match_futex - Check whether two futex keys are equal
 * @key1:	Pointer to key1
 * @key2:	Pointer to key2
 *
L
Linus Torvalds 已提交
410 411 412 413
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
414 415
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
416 417 418 419
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

420 421 422 423 424 425 426 427 428 429
/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
static void get_futex_key_refs(union futex_key *key)
{
	if (!key->both.ptr)
		return;

430 431 432 433 434 435 436 437 438 439
	/*
	 * On MMU less systems futexes are always "private" as there is no per
	 * process address space. We need the smp wmb nevertheless - yes,
	 * arch/blackfin has MMU less SMP ...
	 */
	if (!IS_ENABLED(CONFIG_MMU)) {
		smp_mb(); /* explicit smp_mb(); (B) */
		return;
	}

440 441
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
442
		ihold(key->shared.inode); /* implies smp_mb(); (B) */
443 444
		break;
	case FUT_OFF_MMSHARED:
445
		futex_get_mm(key); /* implies smp_mb(); (B) */
446
		break;
447
	default:
448 449 450 451 452
		/*
		 * Private futexes do not hold reference on an inode or
		 * mm, therefore the only purpose of calling get_futex_key_refs
		 * is because we need the barrier for the lockless waiter check.
		 */
453
		smp_mb(); /* explicit smp_mb(); (B) */
454 455 456 457 458
	}
}

/*
 * Drop a reference to the resource addressed by a key.
459 460 461
 * The hash bucket spinlock must not be held. This is
 * a no-op for private futexes, see comment in the get
 * counterpart.
462 463 464
 */
static void drop_futex_key_refs(union futex_key *key)
{
465 466 467
	if (!key->both.ptr) {
		/* If we're here then we tried to put a key we failed to get */
		WARN_ON_ONCE(1);
468
		return;
469
	}
470

471 472 473
	if (!IS_ENABLED(CONFIG_MMU))
		return;

474 475 476 477 478 479 480 481 482 483
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
		iput(key->shared.inode);
		break;
	case FUT_OFF_MMSHARED:
		mmdrop(key->private.mm);
		break;
	}
}

484 485 486 487 488
enum futex_access {
	FUTEX_READ,
	FUTEX_WRITE
};

E
Eric Dumazet 已提交
489
/**
490 491 492 493
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 * @key:	address where result is stored.
494 495
 * @rw:		mapping needs to be read/write (values: FUTEX_READ,
 *              FUTEX_WRITE)
E
Eric Dumazet 已提交
496
 *
497 498
 * Return: a negative error code or 0
 *
499
 * The key words are stored in @key on success.
L
Linus Torvalds 已提交
500
 *
A
Al Viro 已提交
501
 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
L
Linus Torvalds 已提交
502 503 504
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
D
Darren Hart 已提交
505
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
506
 */
507
static int
508
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw)
L
Linus Torvalds 已提交
509
{
510
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
511
	struct mm_struct *mm = current->mm;
512
	struct page *page, *tail;
513
	struct address_space *mapping;
514
	int err, ro = 0;
L
Linus Torvalds 已提交
515 516 517 518

	/*
	 * The futex address must be "naturally" aligned.
	 */
519
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
520
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
521
		return -EINVAL;
522
	address -= key->both.offset;
L
Linus Torvalds 已提交
523

524
	if (unlikely(!access_ok(uaddr, sizeof(u32))))
525 526
		return -EFAULT;

527 528 529
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

E
Eric Dumazet 已提交
530 531 532 533 534 535 536 537 538 539
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		key->private.mm = mm;
		key->private.address = address;
540
		get_futex_key_refs(key);  /* implies smp_mb(); (B) */
E
Eric Dumazet 已提交
541 542
		return 0;
	}
L
Linus Torvalds 已提交
543

544
again:
545 546 547 548
	/* Ignore any VERIFY_READ mapping (futex common case) */
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

549
	err = get_user_pages_fast(address, 1, 1, &page);
550 551 552 553
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
554
	if (err == -EFAULT && rw == FUTEX_READ) {
555 556 557
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
558 559
	if (err < 0)
		return err;
560 561
	else
		err = 0;
562

563 564 565 566 567 568 569 570 571 572
	/*
	 * The treatment of mapping from this point on is critical. The page
	 * lock protects many things but in this context the page lock
	 * stabilizes mapping, prevents inode freeing in the shared
	 * file-backed region case and guards against movement to swap cache.
	 *
	 * Strictly speaking the page lock is not needed in all cases being
	 * considered here and page lock forces unnecessarily serialization
	 * From this point on, mapping will be re-verified if necessary and
	 * page lock will be acquired only if it is unavoidable
573 574 575 576 577 578 579
	 *
	 * Mapping checks require the head page for any compound page so the
	 * head page and mapping is looked up now. For anonymous pages, it
	 * does not matter if the page splits in the future as the key is
	 * based on the address. For filesystem-backed pages, the tail is
	 * required as the index of the page determines the key. For
	 * base pages, there is no tail page and tail == page.
580
	 */
581
	tail = page;
582 583 584
	page = compound_head(page);
	mapping = READ_ONCE(page->mapping);

585
	/*
586
	 * If page->mapping is NULL, then it cannot be a PageAnon
587 588 589 590 591 592 593 594 595 596 597
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
598
	 * an unlikely race, but we do need to retry for page->mapping.
599
	 */
600 601 602 603 604 605 606 607 608 609
	if (unlikely(!mapping)) {
		int shmem_swizzled;

		/*
		 * Page lock is required to identify which special case above
		 * applies. If this is really a shmem page then the page lock
		 * will prevent unexpected transitions.
		 */
		lock_page(page);
		shmem_swizzled = PageSwapCache(page) || page->mapping;
610 611
		unlock_page(page);
		put_page(page);
612

613 614
		if (shmem_swizzled)
			goto again;
615

616
		return -EFAULT;
617
	}
L
Linus Torvalds 已提交
618 619 620 621

	/*
	 * Private mappings are handled in a simple way.
	 *
622 623 624
	 * If the futex key is stored on an anonymous page, then the associated
	 * object is the mm which is implicitly pinned by the calling process.
	 *
L
Linus Torvalds 已提交
625 626
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
627
	 * the object not the particular process.
L
Linus Torvalds 已提交
628
	 */
629
	if (PageAnon(page)) {
630 631 632 633
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
634
		if (unlikely(should_fail_futex(fshared)) || ro) {
635 636 637 638
			err = -EFAULT;
			goto out;
		}

639
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
640
		key->private.mm = mm;
641
		key->private.address = address;
642 643 644

		get_futex_key_refs(key); /* implies smp_mb(); (B) */

645
	} else {
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
		struct inode *inode;

		/*
		 * The associated futex object in this case is the inode and
		 * the page->mapping must be traversed. Ordinarily this should
		 * be stabilised under page lock but it's not strictly
		 * necessary in this case as we just want to pin the inode, not
		 * update the radix tree or anything like that.
		 *
		 * The RCU read lock is taken as the inode is finally freed
		 * under RCU. If the mapping still matches expectations then the
		 * mapping->host can be safely accessed as being a valid inode.
		 */
		rcu_read_lock();

		if (READ_ONCE(page->mapping) != mapping) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		inode = READ_ONCE(mapping->host);
		if (!inode) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/*
		 * Take a reference unless it is about to be freed. Previously
		 * this reference was taken by ihold under the page lock
		 * pinning the inode in place so i_lock was unnecessary. The
		 * only way for this check to fail is if the inode was
681 682
		 * truncated in parallel which is almost certainly an
		 * application bug. In such a case, just retry.
683 684 685 686 687
		 *
		 * We are not calling into get_futex_key_refs() in file-backed
		 * cases, therefore a successful atomic_inc return below will
		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
		 */
688
		if (!atomic_inc_not_zero(&inode->i_count)) {
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/* Should be impossible but lets be paranoid for now */
		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
			err = -EFAULT;
			rcu_read_unlock();
			iput(inode);

			goto out;
		}

704
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
705
		key->shared.inode = inode;
706
		key->shared.pgoff = basepage_index(tail);
707
		rcu_read_unlock();
L
Linus Torvalds 已提交
708 709
	}

710
out:
711
	put_page(page);
712
	return err;
L
Linus Torvalds 已提交
713 714
}

715
static inline void put_futex_key(union futex_key *key)
L
Linus Torvalds 已提交
716
{
717
	drop_futex_key_refs(key);
L
Linus Torvalds 已提交
718 719
}

720 721
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
722 723 724 725 726
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
727
 * We have no generic implementation of a non-destructive write to the
728 729 730 731 732 733
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
734 735 736 737
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
738
	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
739
			       FAULT_FLAG_WRITE, NULL);
740 741
	up_read(&mm->mmap_sem);

742 743 744
	return ret < 0 ? ret : 0;
}

745 746
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
747 748
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

764 765
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
766
{
767
	int ret;
T
Thomas Gleixner 已提交
768 769

	pagefault_disable();
770
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
771 772
	pagefault_enable();

773
	return ret;
T
Thomas Gleixner 已提交
774 775 776
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
777 778 779
{
	int ret;

780
	pagefault_disable();
781
	ret = __get_user(*dest, from);
782
	pagefault_enable();
L
Linus Torvalds 已提交
783 784 785 786

	return ret ? -EFAULT : 0;
}

787 788 789 790 791 792 793 794 795 796 797

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

798
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
799 800 801 802 803 804 805 806

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
	atomic_set(&pi_state->refcount, 1);
807
	pi_state->key = FUTEX_KEY_INIT;
808 809 810 811 812 813

	current->pi_state_cache = pi_state;

	return 0;
}

P
Peter Zijlstra 已提交
814
static struct futex_pi_state *alloc_pi_state(void)
815 816 817 818 819 820 821 822 823
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

P
Peter Zijlstra 已提交
824 825 826 827 828
static void get_pi_state(struct futex_pi_state *pi_state)
{
	WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
}

829
/*
830 831
 * Drops a reference to the pi_state object and frees or caches it
 * when the last reference is gone.
832
 */
833
static void put_pi_state(struct futex_pi_state *pi_state)
834
{
835 836 837
	if (!pi_state)
		return;

838 839 840 841 842 843 844 845
	if (!atomic_dec_and_test(&pi_state->refcount))
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
846
		struct task_struct *owner;
847

848 849 850 851 852 853 854 855 856
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		owner = pi_state->owner;
		if (owner) {
			raw_spin_lock(&owner->pi_lock);
			list_del_init(&pi_state->list);
			raw_spin_unlock(&owner->pi_lock);
		}
		rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
857 858
	}

859
	if (current->pi_state_cache) {
860
		kfree(pi_state);
861
	} else {
862 863 864 865 866 867 868 869 870 871 872
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
		atomic_set(&pi_state->refcount, 1);
		current->pi_state_cache = pi_state;
	}
}

873 874
#ifdef CONFIG_FUTEX_PI

875 876 877 878 879 880 881 882 883
/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
void exit_pi_state_list(struct task_struct *curr)
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
884
	struct futex_hash_bucket *hb;
885
	union futex_key key = FUTEX_KEY_INIT;
886

887 888
	if (!futex_cmpxchg_enabled)
		return;
889 890 891
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
892
	 * versus waiters unqueueing themselves:
893
	 */
894
	raw_spin_lock_irq(&curr->pi_lock);
895 896 897 898
	while (!list_empty(head)) {
		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
899
		hb = hash_futex(&key);
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916

		/*
		 * We can race against put_pi_state() removing itself from the
		 * list (a waiter going away). put_pi_state() will first
		 * decrement the reference count and then modify the list, so
		 * its possible to see the list entry but fail this reference
		 * acquire.
		 *
		 * In that case; drop the locks to let put_pi_state() make
		 * progress and retry the loop.
		 */
		if (!atomic_inc_not_zero(&pi_state->refcount)) {
			raw_spin_unlock_irq(&curr->pi_lock);
			cpu_relax();
			raw_spin_lock_irq(&curr->pi_lock);
			continue;
		}
917
		raw_spin_unlock_irq(&curr->pi_lock);
918 919

		spin_lock(&hb->lock);
920 921
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		raw_spin_lock(&curr->pi_lock);
922 923 924 925
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
926
		if (head->next != next) {
927
			/* retain curr->pi_lock for the loop invariant */
928
			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
929
			spin_unlock(&hb->lock);
930
			put_pi_state(pi_state);
931 932 933 934
			continue;
		}

		WARN_ON(pi_state->owner != curr);
935 936
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
937 938
		pi_state->owner = NULL;

939
		raw_spin_unlock(&curr->pi_lock);
940
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
941 942
		spin_unlock(&hb->lock);

943 944 945
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);

946
		raw_spin_lock_irq(&curr->pi_lock);
947
	}
948
	raw_spin_unlock_irq(&curr->pi_lock);
949 950
}

951 952
#endif

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
/*
 * We need to check the following states:
 *
 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 *
 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 *
 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 *
 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 *
 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 *
 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 *
 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 *
 * [1]	Indicates that the kernel can acquire the futex atomically. We
 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 *
 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 *      thread is found then it indicates that the owner TID has died.
 *
 * [3]	Invalid. The waiter is queued on a non PI futex
 *
 * [4]	Valid state after exit_robust_list(), which sets the user space
 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 *
 * [5]	The user space value got manipulated between exit_robust_list()
 *	and exit_pi_state_list()
 *
 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 *	the pi_state but cannot access the user space value.
 *
 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 *
 * [8]	Owner and user space value match
 *
 * [9]	There is no transient state which sets the user space TID to 0
 *	except exit_robust_list(), but this is indicated by the
 *	FUTEX_OWNER_DIED bit. See [4]
 *
 * [10] There is no transient state which leaves owner and user space
 *	TID out of sync.
P
Peter Zijlstra 已提交
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
 *
 *
 * Serialization and lifetime rules:
 *
 * hb->lock:
 *
 *	hb -> futex_q, relation
 *	futex_q -> pi_state, relation
 *
 *	(cannot be raw because hb can contain arbitrary amount
 *	 of futex_q's)
 *
 * pi_mutex->wait_lock:
 *
 *	{uval, pi_state}
 *
 *	(and pi_mutex 'obviously')
 *
 * p->pi_lock:
 *
 *	p->pi_state_list -> pi_state->list, relation
 *
 * pi_state->refcount:
 *
 *	pi_state lifetime
 *
 *
 * Lock order:
 *
 *   hb->lock
 *     pi_mutex->wait_lock
 *       p->pi_lock
 *
1034
 */
1035 1036 1037 1038 1039 1040

/*
 * Validate that the existing waiter has a pi_state and sanity check
 * the pi_state against the user space value. If correct, attach to
 * it.
 */
P
Peter Zijlstra 已提交
1041 1042
static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
			      struct futex_pi_state *pi_state,
1043
			      struct futex_pi_state **ps)
1044
{
1045
	pid_t pid = uval & FUTEX_TID_MASK;
1046 1047
	u32 uval2;
	int ret;
1048

1049 1050 1051 1052 1053
	/*
	 * Userspace might have messed up non-PI and PI futexes [3]
	 */
	if (unlikely(!pi_state))
		return -EINVAL;
1054

P
Peter Zijlstra 已提交
1055 1056 1057 1058 1059 1060
	/*
	 * We get here with hb->lock held, and having found a
	 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
	 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
	 * which in turn means that futex_lock_pi() still has a reference on
	 * our pi_state.
1061 1062 1063 1064 1065
	 *
	 * The waiter holding a reference on @pi_state also protects against
	 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
	 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
	 * free pi_state before we can take a reference ourselves.
P
Peter Zijlstra 已提交
1066
	 */
1067
	WARN_ON(!atomic_read(&pi_state->refcount));
1068

P
Peter Zijlstra 已提交
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	/*
	 * Now that we have a pi_state, we can acquire wait_lock
	 * and do the state validation.
	 */
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	/*
	 * Since {uval, pi_state} is serialized by wait_lock, and our current
	 * uval was read without holding it, it can have changed. Verify it
	 * still is what we expect it to be, otherwise retry the entire
	 * operation.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		goto out_efault;

	if (uval != uval2)
		goto out_eagain;

1087 1088 1089 1090
	/*
	 * Handle the owner died case:
	 */
	if (uval & FUTEX_OWNER_DIED) {
1091
		/*
1092 1093 1094
		 * exit_pi_state_list sets owner to NULL and wakes the
		 * topmost waiter. The task which acquires the
		 * pi_state->rt_mutex will fixup owner.
1095
		 */
1096
		if (!pi_state->owner) {
1097
			/*
1098 1099
			 * No pi state owner, but the user space TID
			 * is not 0. Inconsistent state. [5]
1100
			 */
1101
			if (pid)
P
Peter Zijlstra 已提交
1102
				goto out_einval;
1103
			/*
1104
			 * Take a ref on the state and return success. [4]
1105
			 */
P
Peter Zijlstra 已提交
1106
			goto out_attach;
1107
		}
1108 1109

		/*
1110 1111 1112 1113 1114 1115 1116 1117
		 * If TID is 0, then either the dying owner has not
		 * yet executed exit_pi_state_list() or some waiter
		 * acquired the rtmutex in the pi state, but did not
		 * yet fixup the TID in user space.
		 *
		 * Take a ref on the state and return success. [6]
		 */
		if (!pid)
P
Peter Zijlstra 已提交
1118
			goto out_attach;
1119 1120 1121 1122
	} else {
		/*
		 * If the owner died bit is not set, then the pi_state
		 * must have an owner. [7]
1123
		 */
1124
		if (!pi_state->owner)
P
Peter Zijlstra 已提交
1125
			goto out_einval;
1126 1127
	}

1128 1129 1130 1131 1132 1133
	/*
	 * Bail out if user space manipulated the futex value. If pi
	 * state exists then the owner TID must be the same as the
	 * user space TID. [9/10]
	 */
	if (pid != task_pid_vnr(pi_state->owner))
P
Peter Zijlstra 已提交
1134 1135 1136
		goto out_einval;

out_attach:
P
Peter Zijlstra 已提交
1137
	get_pi_state(pi_state);
P
Peter Zijlstra 已提交
1138
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1139 1140
	*ps = pi_state;
	return 0;
P
Peter Zijlstra 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156

out_einval:
	ret = -EINVAL;
	goto out_error;

out_eagain:
	ret = -EAGAIN;
	goto out_error;

out_efault:
	ret = -EFAULT;
	goto out_error;

out_error:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
1157 1158
}

T
Thomas Gleixner 已提交
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
static int handle_exit_race(u32 __user *uaddr, u32 uval,
			    struct task_struct *tsk)
{
	u32 uval2;

	/*
	 * If PF_EXITPIDONE is not yet set, then try again.
	 */
	if (tsk && !(tsk->flags & PF_EXITPIDONE))
		return -EAGAIN;

	/*
	 * Reread the user space value to handle the following situation:
	 *
	 * CPU0				CPU1
	 *
	 * sys_exit()			sys_futex()
	 *  do_exit()			 futex_lock_pi()
	 *                                futex_lock_pi_atomic()
	 *   exit_signals(tsk)		    No waiters:
	 *    tsk->flags |= PF_EXITING;	    *uaddr == 0x00000PID
	 *  mm_release(tsk)		    Set waiter bit
	 *   exit_robust_list(tsk) {	    *uaddr = 0x80000PID;
	 *      Set owner died		    attach_to_pi_owner() {
	 *    *uaddr = 0xC0000000;	     tsk = get_task(PID);
	 *   }				     if (!tsk->flags & PF_EXITING) {
	 *  ...				       attach();
	 *  tsk->flags |= PF_EXITPIDONE;     } else {
	 *				       if (!(tsk->flags & PF_EXITPIDONE))
	 *				         return -EAGAIN;
	 *				       return -ESRCH; <--- FAIL
	 *				     }
	 *
	 * Returning ESRCH unconditionally is wrong here because the
	 * user space value has been changed by the exiting task.
	 *
	 * The same logic applies to the case where the exiting task is
	 * already gone.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		return -EFAULT;

	/* If the user space value has changed, try again. */
	if (uval2 != uval)
		return -EAGAIN;

	/*
	 * The exiting task did not have a robust list, the robust list was
	 * corrupted or the user space value in *uaddr is simply bogus.
	 * Give up and tell user space.
	 */
	return -ESRCH;
}

1213 1214 1215 1216
/*
 * Lookup the task for the TID provided from user space and attach to
 * it after doing proper sanity checks.
 */
T
Thomas Gleixner 已提交
1217
static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1218
			      struct futex_pi_state **ps)
1219 1220
{
	pid_t pid = uval & FUTEX_TID_MASK;
1221 1222
	struct futex_pi_state *pi_state;
	struct task_struct *p;
1223

1224
	/*
1225
	 * We are the first waiter - try to look up the real owner and attach
1226
	 * the new pi_state to it, but bail out when TID = 0 [1]
T
Thomas Gleixner 已提交
1227 1228 1229
	 *
	 * The !pid check is paranoid. None of the call sites should end up
	 * with pid == 0, but better safe than sorry. Let the caller retry
1230
	 */
1231
	if (!pid)
T
Thomas Gleixner 已提交
1232
		return -EAGAIN;
1233
	p = find_get_task_by_vpid(pid);
1234
	if (!p)
T
Thomas Gleixner 已提交
1235
		return handle_exit_race(uaddr, uval, NULL);
1236

1237
	if (unlikely(p->flags & PF_KTHREAD)) {
1238 1239 1240 1241
		put_task_struct(p);
		return -EPERM;
	}

1242 1243 1244 1245 1246 1247
	/*
	 * We need to look at the task state flags to figure out,
	 * whether the task is exiting. To protect against the do_exit
	 * change of the task flags, we do this protected by
	 * p->pi_lock:
	 */
1248
	raw_spin_lock_irq(&p->pi_lock);
1249 1250 1251 1252 1253 1254
	if (unlikely(p->flags & PF_EXITING)) {
		/*
		 * The task is on the way out. When PF_EXITPIDONE is
		 * set, we know that the task has finished the
		 * cleanup:
		 */
T
Thomas Gleixner 已提交
1255
		int ret = handle_exit_race(uaddr, uval, p);
1256

1257
		raw_spin_unlock_irq(&p->pi_lock);
1258 1259 1260
		put_task_struct(p);
		return ret;
	}
1261

1262 1263
	/*
	 * No existing pi state. First waiter. [2]
P
Peter Zijlstra 已提交
1264 1265 1266
	 *
	 * This creates pi_state, we have hb->lock held, this means nothing can
	 * observe this state, wait_lock is irrelevant.
1267
	 */
1268 1269 1270
	pi_state = alloc_pi_state();

	/*
1271
	 * Initialize the pi_mutex in locked state and make @p
1272 1273 1274 1275 1276
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
1277
	pi_state->key = *key;
1278

1279
	WARN_ON(!list_empty(&pi_state->list));
1280
	list_add(&pi_state->list, &p->pi_state_list);
1281 1282 1283 1284
	/*
	 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
	 * because there is no concurrency as the object is not published yet.
	 */
1285
	pi_state->owner = p;
1286
	raw_spin_unlock_irq(&p->pi_lock);
1287 1288 1289

	put_task_struct(p);

P
Pierre Peiffer 已提交
1290
	*ps = pi_state;
1291 1292 1293 1294

	return 0;
}

P
Peter Zijlstra 已提交
1295 1296
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
			   struct futex_hash_bucket *hb,
1297 1298
			   union futex_key *key, struct futex_pi_state **ps)
{
1299
	struct futex_q *top_waiter = futex_top_waiter(hb, key);
1300 1301 1302 1303 1304

	/*
	 * If there is a waiter on that futex, validate it and
	 * attach to the pi_state when the validation succeeds.
	 */
1305
	if (top_waiter)
P
Peter Zijlstra 已提交
1306
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1307 1308 1309 1310 1311

	/*
	 * We are the first waiter - try to look up the owner based on
	 * @uval and attach to it.
	 */
T
Thomas Gleixner 已提交
1312
	return attach_to_pi_owner(uaddr, uval, key, ps);
1313 1314
}

1315 1316 1317 1318
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
	u32 uninitialized_var(curval);

1319 1320 1321
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1322 1323 1324
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
		return -EFAULT;

P
Peter Zijlstra 已提交
1325
	/* If user space value changed, let the caller retry */
1326 1327 1328
	return curval != uval ? -EAGAIN : 0;
}

1329
/**
1330
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1331 1332 1333 1334 1335 1336 1337 1338
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1339
 *
1340
 * Return:
1341 1342 1343
 *  -  0 - ready to wait;
 *  -  1 - acquired the lock;
 *  - <0 - error
1344 1345 1346 1347 1348 1349
 *
 * The hb->lock and futex_key refs shall be held by the caller.
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
1350
				struct task_struct *task, int set_waiters)
1351
{
1352
	u32 uval, newval, vpid = task_pid_vnr(task);
1353
	struct futex_q *top_waiter;
1354
	int ret;
1355 1356

	/*
1357 1358
	 * Read the user space value first so we can validate a few
	 * things before proceeding further.
1359
	 */
1360
	if (get_futex_value_locked(&uval, uaddr))
1361 1362
		return -EFAULT;

1363 1364 1365
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1366 1367 1368
	/*
	 * Detect deadlocks.
	 */
1369
	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1370 1371
		return -EDEADLK;

1372 1373 1374
	if ((unlikely(should_fail_futex(true))))
		return -EDEADLK;

1375
	/*
1376 1377
	 * Lookup existing state first. If it exists, try to attach to
	 * its pi_state.
1378
	 */
1379 1380
	top_waiter = futex_top_waiter(hb, key);
	if (top_waiter)
P
Peter Zijlstra 已提交
1381
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1382 1383

	/*
1384 1385 1386 1387
	 * No waiter and user TID is 0. We are here because the
	 * waiters or the owner died bit is set or called from
	 * requeue_cmp_pi or for whatever reason something took the
	 * syscall.
1388
	 */
1389
	if (!(uval & FUTEX_TID_MASK)) {
1390
		/*
1391 1392
		 * We take over the futex. No other waiters and the user space
		 * TID is 0. We preserve the owner died bit.
1393
		 */
1394 1395
		newval = uval & FUTEX_OWNER_DIED;
		newval |= vpid;
1396

1397 1398 1399 1400 1401 1402 1403 1404
		/* The futex requeue_pi code can enforce the waiters bit */
		if (set_waiters)
			newval |= FUTEX_WAITERS;

		ret = lock_pi_update_atomic(uaddr, uval, newval);
		/* If the take over worked, return 1 */
		return ret < 0 ? ret : 1;
	}
1405 1406

	/*
1407 1408 1409
	 * First waiter. Set the waiters bit before attaching ourself to
	 * the owner. If owner tries to unlock, it will be forced into
	 * the kernel and blocked on hb->lock.
1410
	 */
1411 1412 1413 1414
	newval = uval | FUTEX_WAITERS;
	ret = lock_pi_update_atomic(uaddr, uval, newval);
	if (ret)
		return ret;
1415
	/*
1416 1417 1418
	 * If the update of the user space value succeeded, we try to
	 * attach to the owner. If that fails, no harm done, we only
	 * set the FUTEX_WAITERS bit in the user space variable.
1419
	 */
T
Thomas Gleixner 已提交
1420
	return attach_to_pi_owner(uaddr, newval, key, ps);
1421 1422
}

1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

1433
	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1434
		return;
1435
	lockdep_assert_held(q->lock_ptr);
1436 1437 1438

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
1439
	hb_waiters_dec(hb);
1440 1441
}

L
Linus Torvalds 已提交
1442 1443
/*
 * The hash bucket lock must be held when this is called.
1444 1445 1446
 * Afterwards, the futex_q must not be accessed. Callers
 * must ensure to later call wake_up_q() for the actual
 * wakeups to occur.
L
Linus Torvalds 已提交
1447
 */
1448
static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
L
Linus Torvalds 已提交
1449
{
T
Thomas Gleixner 已提交
1450 1451
	struct task_struct *p = q->task;

1452 1453 1454
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

1455
	get_task_struct(p);
1456
	__unqueue_futex(q);
L
Linus Torvalds 已提交
1457
	/*
1458 1459 1460 1461 1462
	 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
	 * is written, without taking any locks. This is possible in the event
	 * of a spurious wakeup, for example. A memory barrier is required here
	 * to prevent the following store to lock_ptr from getting ahead of the
	 * plist_del in __unqueue_futex().
L
Linus Torvalds 已提交
1463
	 */
1464
	smp_store_release(&q->lock_ptr, NULL);
1465 1466 1467 1468 1469 1470 1471

	/*
	 * Queue the task for later wakeup for after we've released
	 * the hb->lock. wake_q_add() grabs reference to p.
	 */
	wake_q_add(wake_q, p);
	put_task_struct(p);
L
Linus Torvalds 已提交
1472 1473
}

1474 1475 1476 1477
/*
 * Caller must hold a reference on @pi_state.
 */
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1478
{
1479
	u32 uninitialized_var(curval), newval;
1480
	struct task_struct *new_owner;
P
Peter Zijlstra 已提交
1481
	bool postunlock = false;
1482
	DEFINE_WAKE_Q(wake_q);
1483
	int ret = 0;
1484 1485

	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1486
	if (WARN_ON_ONCE(!new_owner)) {
1487
		/*
1488
		 * As per the comment in futex_unlock_pi() this should not happen.
1489 1490 1491 1492 1493 1494 1495 1496
		 *
		 * When this happens, give up our locks and try again, giving
		 * the futex_lock_pi() instance time to complete, either by
		 * waiting on the rtmutex or removing itself from the futex
		 * queue.
		 */
		ret = -EAGAIN;
		goto out_unlock;
1497
	}
1498 1499

	/*
1500 1501 1502
	 * We pass it to the next owner. The WAITERS bit is always kept
	 * enabled while there is PI state around. We cleanup the owner
	 * died bit, because we are the owner.
1503
	 */
1504
	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1505

1506 1507 1508
	if (unlikely(should_fail_futex(true)))
		ret = -EFAULT;

1509
	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1510
		ret = -EFAULT;
P
Peter Zijlstra 已提交
1511

1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
	} else if (curval != uval) {
		/*
		 * If a unconditional UNLOCK_PI operation (user space did not
		 * try the TID->0 transition) raced with a waiter setting the
		 * FUTEX_WAITERS flag between get_user() and locking the hash
		 * bucket lock, retry the operation.
		 */
		if ((FUTEX_TID_MASK & curval) == uval)
			ret = -EAGAIN;
		else
			ret = -EINVAL;
	}
P
Peter Zijlstra 已提交
1524

1525 1526
	if (ret)
		goto out_unlock;
1527

1528 1529 1530 1531 1532
	/*
	 * This is a point of no return; once we modify the uval there is no
	 * going back and subsequent operations must not fail.
	 */

1533
	raw_spin_lock(&pi_state->owner->pi_lock);
1534 1535
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
1536
	raw_spin_unlock(&pi_state->owner->pi_lock);
1537

1538
	raw_spin_lock(&new_owner->pi_lock);
1539
	WARN_ON(!list_empty(&pi_state->list));
1540 1541
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
1542
	raw_spin_unlock(&new_owner->pi_lock);
1543

P
Peter Zijlstra 已提交
1544
	postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1545

1546
out_unlock:
1547 1548
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

P
Peter Zijlstra 已提交
1549 1550
	if (postunlock)
		rt_mutex_postunlock(&wake_q);
1551

1552
	return ret;
1553 1554
}

I
Ingo Molnar 已提交
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
1571 1572 1573
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
1574
	spin_unlock(&hb1->lock);
1575 1576
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
1577 1578
}

L
Linus Torvalds 已提交
1579
/*
D
Darren Hart 已提交
1580
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
1581
 */
1582 1583
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
1584
{
1585
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1586
	struct futex_q *this, *next;
1587
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
1588
	int ret;
1589
	DEFINE_WAKE_Q(wake_q);
L
Linus Torvalds 已提交
1590

1591 1592 1593
	if (!bitset)
		return -EINVAL;

1594
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
L
Linus Torvalds 已提交
1595 1596 1597
	if (unlikely(ret != 0))
		goto out;

1598
	hb = hash_futex(&key);
1599 1600 1601 1602 1603

	/* Make sure we really have tasks to wakeup */
	if (!hb_waiters_pending(hb))
		goto out_put_key;

1604
	spin_lock(&hb->lock);
L
Linus Torvalds 已提交
1605

J
Jason Low 已提交
1606
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
L
Linus Torvalds 已提交
1607
		if (match_futex (&this->key, &key)) {
1608
			if (this->pi_state || this->rt_waiter) {
1609 1610 1611
				ret = -EINVAL;
				break;
			}
1612 1613 1614 1615 1616

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

1617
			mark_wake_futex(&wake_q, this);
L
Linus Torvalds 已提交
1618 1619 1620 1621 1622
			if (++ret >= nr_wake)
				break;
		}
	}

1623
	spin_unlock(&hb->lock);
1624
	wake_up_q(&wake_q);
1625
out_put_key:
1626
	put_futex_key(&key);
1627
out:
L
Linus Torvalds 已提交
1628 1629 1630
	return ret;
}

1631 1632 1633 1634
static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{
	unsigned int op =	  (encoded_op & 0x70000000) >> 28;
	unsigned int cmp =	  (encoded_op & 0x0f000000) >> 24;
1635 1636
	int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
	int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1637 1638 1639
	int oldval, ret;

	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
		if (oparg < 0 || oparg > 31) {
			char comm[sizeof(current->comm)];
			/*
			 * kill this print and return -EINVAL when userspace
			 * is sane again
			 */
			pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
					get_task_comm(comm, current), oparg);
			oparg &= 31;
		}
1650 1651 1652
		oparg = 1 << oparg;
	}

1653
	if (!access_ok(uaddr, sizeof(u32)))
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
		return -EFAULT;

	ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
	if (ret)
		return ret;

	switch (cmp) {
	case FUTEX_OP_CMP_EQ:
		return oldval == cmparg;
	case FUTEX_OP_CMP_NE:
		return oldval != cmparg;
	case FUTEX_OP_CMP_LT:
		return oldval < cmparg;
	case FUTEX_OP_CMP_GE:
		return oldval >= cmparg;
	case FUTEX_OP_CMP_LE:
		return oldval <= cmparg;
	case FUTEX_OP_CMP_GT:
		return oldval > cmparg;
	default:
		return -ENOSYS;
	}
}

1678 1679 1680 1681
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1682
static int
1683
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1684
	      int nr_wake, int nr_wake2, int op)
1685
{
1686
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1687
	struct futex_hash_bucket *hb1, *hb2;
1688
	struct futex_q *this, *next;
D
Darren Hart 已提交
1689
	int ret, op_ret;
1690
	DEFINE_WAKE_Q(wake_q);
1691

D
Darren Hart 已提交
1692
retry:
1693
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1694 1695
	if (unlikely(ret != 0))
		goto out;
1696
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1697
	if (unlikely(ret != 0))
1698
		goto out_put_key1;
1699

1700 1701
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1702

D
Darren Hart 已提交
1703
retry_private:
T
Thomas Gleixner 已提交
1704
	double_lock_hb(hb1, hb2);
1705
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1706 1707
	if (unlikely(op_ret < 0)) {

D
Darren Hart 已提交
1708
		double_unlock_hb(hb1, hb2);
1709

1710
#ifndef CONFIG_MMU
1711 1712 1713 1714
		/*
		 * we don't get EFAULT from MMU faults if we don't have an MMU,
		 * but we might get them from range checking
		 */
1715
		ret = op_ret;
1716
		goto out_put_keys;
1717 1718
#endif

1719 1720
		if (unlikely(op_ret != -EFAULT)) {
			ret = op_ret;
1721
			goto out_put_keys;
1722 1723
		}

1724
		ret = fault_in_user_writeable(uaddr2);
1725
		if (ret)
1726
			goto out_put_keys;
1727

1728
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1729 1730
			goto retry_private;

1731 1732
		put_futex_key(&key2);
		put_futex_key(&key1);
D
Darren Hart 已提交
1733
		goto retry;
1734 1735
	}

J
Jason Low 已提交
1736
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1737
		if (match_futex (&this->key, &key1)) {
1738 1739 1740 1741
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1742
			mark_wake_futex(&wake_q, this);
1743 1744 1745 1746 1747 1748 1749
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
		op_ret = 0;
J
Jason Low 已提交
1750
		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1751
			if (match_futex (&this->key, &key2)) {
1752 1753 1754 1755
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1756
				mark_wake_futex(&wake_q, this);
1757 1758 1759 1760 1761 1762 1763
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1764
out_unlock:
D
Darren Hart 已提交
1765
	double_unlock_hb(hb1, hb2);
1766
	wake_up_q(&wake_q);
1767
out_put_keys:
1768
	put_futex_key(&key2);
1769
out_put_key1:
1770
	put_futex_key(&key1);
1771
out:
1772 1773 1774
	return ret;
}

D
Darren Hart 已提交
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
1793 1794
		hb_waiters_dec(hb1);
		hb_waiters_inc(hb2);
1795
		plist_add(&q->list, &hb2->chain);
D
Darren Hart 已提交
1796 1797 1798 1799 1800 1801
		q->lock_ptr = &hb2->lock;
	}
	get_futex_key_refs(key2);
	q->key = *key2;
}

1802 1803
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1804 1805 1806
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1807 1808 1809 1810 1811
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1812 1813 1814
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1815 1816
 */
static inline
1817 1818
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1819 1820 1821 1822
{
	get_futex_key_refs(key);
	q->key = *key;

1823
	__unqueue_futex(q);
1824 1825 1826 1827

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1828 1829
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1830
	wake_up_state(q->task, TASK_NORMAL);
1831 1832 1833 1834
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1835 1836 1837 1838 1839 1840 1841
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1842 1843
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1844 1845 1846
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1847
 *
1848
 * Return:
1849 1850 1851
 *  -  0 - failed to acquire the lock atomically;
 *  - >0 - acquired the lock, return value is vpid of the top_waiter
 *  - <0 - error
1852 1853 1854 1855 1856
 */
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
				 struct futex_hash_bucket *hb1,
				 struct futex_hash_bucket *hb2,
				 union futex_key *key1, union futex_key *key2,
1857
				 struct futex_pi_state **ps, int set_waiters)
1858
{
1859
	struct futex_q *top_waiter = NULL;
1860
	u32 curval;
1861
	int ret, vpid;
1862 1863 1864 1865

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1866 1867 1868
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1869 1870 1871 1872 1873 1874 1875 1876
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1877 1878 1879 1880 1881 1882
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1883 1884 1885 1886
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1887
	/*
1888 1889 1890
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1891
	 */
1892
	vpid = task_pid_vnr(top_waiter->task);
1893 1894
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
				   set_waiters);
1895
	if (ret == 1) {
1896
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1897 1898
		return vpid;
	}
1899 1900 1901 1902 1903
	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1904
 * @uaddr1:	source futex user address
1905
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1906 1907 1908 1909 1910
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1911
 *		pi futex (pi to pi requeue is not supported)
1912 1913 1914 1915
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1916
 * Return:
1917 1918
 *  - >=0 - on success, the number of tasks requeued or woken;
 *  -  <0 - on error
L
Linus Torvalds 已提交
1919
 */
1920 1921 1922
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
1923
{
1924
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1925 1926
	int drop_count = 0, task_count = 0, ret;
	struct futex_pi_state *pi_state = NULL;
1927
	struct futex_hash_bucket *hb1, *hb2;
L
Linus Torvalds 已提交
1928
	struct futex_q *this, *next;
1929
	DEFINE_WAKE_Q(wake_q);
1930

1931 1932 1933
	if (nr_wake < 0 || nr_requeue < 0)
		return -EINVAL;

1934 1935 1936 1937 1938 1939 1940 1941 1942
	/*
	 * When PI not supported: return -ENOSYS if requeue_pi is true,
	 * consequently the compiler knows requeue_pi is always false past
	 * this point which will optimize away all the conditional code
	 * further down.
	 */
	if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
		return -ENOSYS;

1943
	if (requeue_pi) {
1944 1945 1946 1947 1948 1949 1950
		/*
		 * Requeue PI only works on two distinct uaddrs. This
		 * check is only valid for private futexes. See below.
		 */
		if (uaddr1 == uaddr2)
			return -EINVAL;

1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
1970

1971
retry:
1972
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
L
Linus Torvalds 已提交
1973 1974
	if (unlikely(ret != 0))
		goto out;
1975
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1976
			    requeue_pi ? FUTEX_WRITE : FUTEX_READ);
L
Linus Torvalds 已提交
1977
	if (unlikely(ret != 0))
1978
		goto out_put_key1;
L
Linus Torvalds 已提交
1979

1980 1981 1982 1983 1984 1985 1986 1987 1988
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (requeue_pi && match_futex(&key1, &key2)) {
		ret = -EINVAL;
		goto out_put_keys;
	}

1989 1990
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
1991

D
Darren Hart 已提交
1992
retry_private:
1993
	hb_waiters_inc(hb2);
I
Ingo Molnar 已提交
1994
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1995

1996 1997
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
1998

1999
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
2000 2001

		if (unlikely(ret)) {
D
Darren Hart 已提交
2002
			double_unlock_hb(hb1, hb2);
2003
			hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
2004

2005
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
2006 2007
			if (ret)
				goto out_put_keys;
L
Linus Torvalds 已提交
2008

2009
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2010
				goto retry_private;
L
Linus Torvalds 已提交
2011

2012 2013
			put_futex_key(&key2);
			put_futex_key(&key1);
D
Darren Hart 已提交
2014
			goto retry;
L
Linus Torvalds 已提交
2015
		}
2016
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
2017 2018 2019 2020 2021
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

2022
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2023 2024 2025 2026 2027 2028
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
2029
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2030
						 &key2, &pi_state, nr_requeue);
2031 2032 2033 2034 2035

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
2036 2037
		 * reference to it. If the lock was taken, ret contains the
		 * vpid of the top waiter task.
2038 2039
		 * If the lock was not taken, we have pi_state and an initial
		 * refcount on it. In case of an error we have nothing.
2040
		 */
2041
		if (ret > 0) {
2042
			WARN_ON(pi_state);
2043
			drop_count++;
2044
			task_count++;
2045
			/*
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
			 * If we acquired the lock, then the user space value
			 * of uaddr2 should be vpid. It cannot be changed by
			 * the top waiter as it is blocked on hb2 lock if it
			 * tries to do so. If something fiddled with it behind
			 * our back the pi state lookup might unearth it. So
			 * we rather use the known value than rereading and
			 * handing potential crap to lookup_pi_state.
			 *
			 * If that call succeeds then we have pi_state and an
			 * initial refcount on it.
2056
			 */
P
Peter Zijlstra 已提交
2057
			ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
2058 2059 2060 2061
		}

		switch (ret) {
		case 0:
2062
			/* We hold a reference on the pi state. */
2063
			break;
2064 2065

			/* If the above failed, then pi_state is NULL */
2066 2067
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
2068
			hb_waiters_dec(hb2);
2069 2070
			put_futex_key(&key2);
			put_futex_key(&key1);
2071
			ret = fault_in_user_writeable(uaddr2);
2072 2073 2074 2075
			if (!ret)
				goto retry;
			goto out;
		case -EAGAIN:
2076 2077 2078 2079 2080 2081
			/*
			 * Two reasons for this:
			 * - Owner is exiting and we just wait for the
			 *   exit to complete.
			 * - The user space value changed.
			 */
2082
			double_unlock_hb(hb1, hb2);
2083
			hb_waiters_dec(hb2);
2084 2085
			put_futex_key(&key2);
			put_futex_key(&key1);
2086 2087 2088 2089 2090 2091 2092
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

J
Jason Low 已提交
2093
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2094 2095 2096 2097
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
2098
			continue;
2099

2100 2101 2102
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
2103 2104 2105
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
2106 2107
		 */
		if ((requeue_pi && !this->rt_waiter) ||
2108 2109
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
2110 2111 2112
			ret = -EINVAL;
			break;
		}
2113 2114 2115 2116 2117 2118 2119

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
2120
			mark_wake_futex(&wake_q, this);
2121 2122
			continue;
		}
L
Linus Torvalds 已提交
2123

2124 2125 2126 2127 2128 2129
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

2130 2131 2132 2133 2134
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
2135 2136 2137 2138 2139
			/*
			 * Prepare the waiter to take the rt_mutex. Take a
			 * refcount on the pi_state and store the pointer in
			 * the futex_q object of the waiter.
			 */
P
Peter Zijlstra 已提交
2140
			get_pi_state(pi_state);
2141 2142 2143
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
2144
							this->task);
2145
			if (ret == 1) {
2146 2147 2148 2149 2150 2151 2152 2153
				/*
				 * We got the lock. We do neither drop the
				 * refcount on pi_state nor clear
				 * this->pi_state because the waiter needs the
				 * pi_state for cleaning up the user space
				 * value. It will drop the refcount after
				 * doing so.
				 */
2154
				requeue_pi_wake_futex(this, &key2, hb2);
2155
				drop_count++;
2156 2157
				continue;
			} else if (ret) {
2158 2159 2160 2161 2162 2163 2164 2165
				/*
				 * rt_mutex_start_proxy_lock() detected a
				 * potential deadlock when we tried to queue
				 * that waiter. Drop the pi_state reference
				 * which we took above and remove the pointer
				 * to the state from the waiters futex_q
				 * object.
				 */
2166
				this->pi_state = NULL;
2167
				put_pi_state(pi_state);
2168 2169 2170 2171 2172
				/*
				 * We stop queueing more waiters and let user
				 * space deal with the mess.
				 */
				break;
2173
			}
L
Linus Torvalds 已提交
2174
		}
2175 2176
		requeue_futex(this, hb1, hb2, &key2);
		drop_count++;
L
Linus Torvalds 已提交
2177 2178
	}

2179 2180 2181 2182 2183
	/*
	 * We took an extra initial reference to the pi_state either
	 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
	 * need to drop it here again.
	 */
2184
	put_pi_state(pi_state);
2185 2186

out_unlock:
D
Darren Hart 已提交
2187
	double_unlock_hb(hb1, hb2);
2188
	wake_up_q(&wake_q);
2189
	hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
2190

2191 2192 2193 2194 2195 2196
	/*
	 * drop_futex_key_refs() must be called outside the spinlocks. During
	 * the requeue we moved futex_q's from the hash bucket at key1 to the
	 * one at key2 and updated their key pointer.  We no longer need to
	 * hold the references to key1.
	 */
L
Linus Torvalds 已提交
2197
	while (--drop_count >= 0)
2198
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
2199

2200
out_put_keys:
2201
	put_futex_key(&key2);
2202
out_put_key1:
2203
	put_futex_key(&key1);
2204
out:
2205
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
2206 2207 2208
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
2209
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2210
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
2211
{
2212
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
2213

2214
	hb = hash_futex(&q->key);
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225

	/*
	 * Increment the counter before taking the lock so that
	 * a potential waker won't miss a to-be-slept task that is
	 * waiting for the spinlock. This is safe as all queue_lock()
	 * users end up calling queue_me(). Similarly, for housekeeping,
	 * decrement the counter at queue_unlock() when some error has
	 * occurred and we don't end up adding the task to the list.
	 */
	hb_waiters_inc(hb);

2226
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
2227

2228
	spin_lock(&hb->lock); /* implies smp_mb(); (A) */
2229
	return hb;
L
Linus Torvalds 已提交
2230 2231
}

2232
static inline void
J
Jason Low 已提交
2233
queue_unlock(struct futex_hash_bucket *hb)
2234
	__releases(&hb->lock)
2235 2236
{
	spin_unlock(&hb->lock);
2237
	hb_waiters_dec(hb);
2238 2239
}

2240
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
L
Linus Torvalds 已提交
2241
{
P
Pierre Peiffer 已提交
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
2256
	q->task = current;
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
	__releases(&hb->lock)
{
	__queue_me(q, hb);
2275
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
2276 2277
}

2278 2279 2280 2281 2282 2283 2284
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
2285
 * Return:
2286 2287
 *  - 1 - if the futex_q was still queued (and we removed unqueued it);
 *  - 0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
2288 2289 2290 2291
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
2292
	int ret = 0;
L
Linus Torvalds 已提交
2293 2294

	/* In the common case we don't take the spinlock, which is nice. */
2295
retry:
2296 2297 2298 2299 2300 2301
	/*
	 * q->lock_ptr can change between this read and the following spin_lock.
	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
	 * optimizing lock_ptr out of the logic below.
	 */
	lock_ptr = READ_ONCE(q->lock_ptr);
2302
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
2321
		__unqueue_futex(q);
2322 2323 2324

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
2325 2326 2327 2328
		spin_unlock(lock_ptr);
		ret = 1;
	}

2329
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
2330 2331 2332
	return ret;
}

2333 2334
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
2335 2336
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
2337
 */
P
Pierre Peiffer 已提交
2338
static void unqueue_me_pi(struct futex_q *q)
2339
	__releases(q->lock_ptr)
2340
{
2341
	__unqueue_futex(q);
2342 2343

	BUG_ON(!q->pi_state);
2344
	put_pi_state(q->pi_state);
2345 2346
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
2347
	spin_unlock(q->lock_ptr);
2348 2349
}

2350
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2351
				struct task_struct *argowner)
P
Pierre Peiffer 已提交
2352 2353
{
	struct futex_pi_state *pi_state = q->pi_state;
2354
	u32 uval, uninitialized_var(curval), newval;
2355 2356
	struct task_struct *oldowner, *newowner;
	u32 newtid;
D
Darren Hart 已提交
2357
	int ret;
P
Pierre Peiffer 已提交
2358

2359 2360
	lockdep_assert_held(q->lock_ptr);

P
Peter Zijlstra 已提交
2361 2362 2363
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	oldowner = pi_state->owner;
2364 2365

	/*
2366
	 * We are here because either:
2367
	 *
2368 2369 2370 2371 2372 2373 2374 2375 2376
	 *  - we stole the lock and pi_state->owner needs updating to reflect
	 *    that (@argowner == current),
	 *
	 * or:
	 *
	 *  - someone stole our lock and we need to fix things to point to the
	 *    new owner (@argowner == NULL).
	 *
	 * Either way, we have to replace the TID in the user space variable.
2377
	 * This must be atomic as we have to preserve the owner died bit here.
2378
	 *
D
Darren Hart 已提交
2379 2380 2381
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
2382
	 *
P
Peter Zijlstra 已提交
2383 2384 2385 2386
	 * Modifying pi_state _before_ the user space value would leave the
	 * pi_state in an inconsistent state when we fault here, because we
	 * need to drop the locks to handle the fault. This might be observed
	 * in the PID check in lookup_pi_state.
2387 2388
	 */
retry:
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
	if (!argowner) {
		if (oldowner != current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}

		if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
			/* We got the lock after all, nothing to fix. */
			ret = 0;
			goto out_unlock;
		}

		/*
		 * Since we just failed the trylock; there must be an owner.
		 */
		newowner = rt_mutex_owner(&pi_state->pi_mutex);
		BUG_ON(!newowner);
	} else {
		WARN_ON_ONCE(argowner != current);
		if (oldowner == current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}
		newowner = argowner;
	}

	newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Peter Zijlstra 已提交
2424 2425 2426
	/* Owner died? */
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;
2427

2428 2429 2430
	if (get_futex_value_locked(&uval, uaddr))
		goto handle_fault;

2431
	for (;;) {
2432 2433
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

2434
		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444
			goto handle_fault;
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
2445
	if (pi_state->owner != NULL) {
P
Peter Zijlstra 已提交
2446
		raw_spin_lock(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
2447 2448
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
P
Peter Zijlstra 已提交
2449
		raw_spin_unlock(&pi_state->owner->pi_lock);
2450
	}
P
Pierre Peiffer 已提交
2451

2452
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
2453

P
Peter Zijlstra 已提交
2454
	raw_spin_lock(&newowner->pi_lock);
P
Pierre Peiffer 已提交
2455
	WARN_ON(!list_empty(&pi_state->list));
2456
	list_add(&pi_state->list, &newowner->pi_state_list);
P
Peter Zijlstra 已提交
2457 2458 2459
	raw_spin_unlock(&newowner->pi_lock);
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

2460
	return 0;
P
Pierre Peiffer 已提交
2461 2462

	/*
P
Peter Zijlstra 已提交
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473
	 * To handle the page fault we need to drop the locks here. That gives
	 * the other task (either the highest priority waiter itself or the
	 * task which stole the rtmutex) the chance to try the fixup of the
	 * pi_state. So once we are back from handling the fault we need to
	 * check the pi_state after reacquiring the locks and before trying to
	 * do another fixup. When the fixup has been done already we simply
	 * return.
	 *
	 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
	 * drop hb->lock since the caller owns the hb -> futex_q relation.
	 * Dropping the pi_mutex->wait_lock requires the state revalidate.
P
Pierre Peiffer 已提交
2474
	 */
2475
handle_fault:
P
Peter Zijlstra 已提交
2476
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2477
	spin_unlock(q->lock_ptr);
2478

2479
	ret = fault_in_user_writeable(uaddr);
2480

2481
	spin_lock(q->lock_ptr);
P
Peter Zijlstra 已提交
2482
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2483

2484 2485 2486
	/*
	 * Check if someone else fixed it for us:
	 */
P
Peter Zijlstra 已提交
2487 2488 2489 2490
	if (pi_state->owner != oldowner) {
		ret = 0;
		goto out_unlock;
	}
2491 2492

	if (ret)
P
Peter Zijlstra 已提交
2493
		goto out_unlock;
2494 2495

	goto retry;
P
Peter Zijlstra 已提交
2496 2497 2498 2499

out_unlock:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
P
Pierre Peiffer 已提交
2500 2501
}

N
Nick Piggin 已提交
2502
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
2503

2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
2514
 * Return:
2515 2516 2517
 *  -  1 - success, lock taken;
 *  -  0 - success, lock not taken;
 *  - <0 - on error (-EFAULT)
2518
 */
2519
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2520 2521 2522 2523 2524 2525 2526
{
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
2527
		 *
2528 2529 2530
		 * Speculative pi_state->owner read (we don't hold wait_lock);
		 * since we own the lock pi_state->owner == current is the
		 * stable state, anything else needs more attention.
2531 2532
		 */
		if (q->pi_state->owner != current)
2533
			ret = fixup_pi_state_owner(uaddr, q, current);
2534 2535 2536
		goto out;
	}

2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549
	/*
	 * If we didn't get the lock; check if anybody stole it from us. In
	 * that case, we need to fix up the uval to point to them instead of
	 * us, otherwise bad things happen. [10]
	 *
	 * Another speculative read; pi_state->owner == current is unstable
	 * but needs our attention.
	 */
	if (q->pi_state->owner == current) {
		ret = fixup_pi_state_owner(uaddr, q, NULL);
		goto out;
	}

2550 2551
	/*
	 * Paranoia check. If we did not take the lock, then we should not be
2552
	 * the owner of the rt_mutex.
2553
	 */
2554
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
2555 2556 2557 2558
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);
2559
	}
2560 2561 2562 2563 2564

out:
	return ret ? ret : locked;
}

2565 2566 2567 2568 2569 2570 2571
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
2572
				struct hrtimer_sleeper *timeout)
2573
{
2574 2575
	/*
	 * The task state is guaranteed to be set before another task can
2576
	 * wake it. set_current_state() is implemented using smp_store_mb() and
2577 2578 2579
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
2580
	set_current_state(TASK_INTERRUPTIBLE);
2581
	queue_me(q, hb);
2582 2583

	/* Arm the timer */
2584
	if (timeout)
2585 2586 2587
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);

	/*
2588 2589
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
2590 2591 2592 2593 2594 2595 2596 2597
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
2598
			freezable_schedule();
2599 2600 2601 2602
	}
	__set_current_state(TASK_RUNNING);
}

2603 2604 2605 2606
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
2607
 * @flags:	futex flags (FLAGS_SHARED, etc.)
2608 2609 2610 2611 2612 2613 2614 2615
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
2616
 * Return:
2617 2618
 *  -  0 - uaddr contains val and hb has been locked;
 *  - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2619
 */
2620
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2621
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
2622
{
2623 2624
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
2625 2626

	/*
D
Darren Hart 已提交
2627
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
2628 2629 2630 2631 2632 2633 2634
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
2635 2636
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
2637 2638
	 * cond(var) false, which would violate the guarantee.
	 *
2639 2640 2641 2642
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
2643
	 */
2644
retry:
2645
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2646
	if (unlikely(ret != 0))
2647
		return ret;
2648 2649 2650 2651

retry_private:
	*hb = queue_lock(q);

2652
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
2653

2654
	if (ret) {
J
Jason Low 已提交
2655
		queue_unlock(*hb);
L
Linus Torvalds 已提交
2656

2657
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
2658
		if (ret)
2659
			goto out;
L
Linus Torvalds 已提交
2660

2661
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2662 2663
			goto retry_private;

2664
		put_futex_key(&q->key);
D
Darren Hart 已提交
2665
		goto retry;
L
Linus Torvalds 已提交
2666
	}
2667

2668
	if (uval != val) {
J
Jason Low 已提交
2669
		queue_unlock(*hb);
2670
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
2671
	}
L
Linus Torvalds 已提交
2672

2673 2674
out:
	if (ret)
2675
		put_futex_key(&q->key);
2676 2677 2678
	return ret;
}

2679 2680
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
2681 2682 2683 2684
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
2685
	struct futex_q q = futex_q_init;
2686 2687 2688 2689 2690 2691 2692 2693 2694
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

	if (abs_time) {
		to = &timeout;

2695 2696 2697
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2698 2699 2700 2701 2702
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

T
Thomas Gleixner 已提交
2703
retry:
2704 2705 2706 2707
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
2708
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2709 2710 2711
	if (ret)
		goto out;

2712
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
2713
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
2714 2715

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
2716
	ret = 0;
2717
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
2718
	if (!unqueue_me(&q))
2719
		goto out;
P
Peter Zijlstra 已提交
2720
	ret = -ETIMEDOUT;
2721
	if (to && !to->task)
2722
		goto out;
N
Nick Piggin 已提交
2723

2724
	/*
T
Thomas Gleixner 已提交
2725 2726
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
2727
	 */
2728
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
2729 2730
		goto retry;

P
Peter Zijlstra 已提交
2731
	ret = -ERESTARTSYS;
2732
	if (!abs_time)
2733
		goto out;
L
Linus Torvalds 已提交
2734

2735
	restart = &current->restart_block;
P
Peter Zijlstra 已提交
2736
	restart->fn = futex_wait_restart;
2737
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
2738
	restart->futex.val = val;
T
Thomas Gleixner 已提交
2739
	restart->futex.time = *abs_time;
P
Peter Zijlstra 已提交
2740
	restart->futex.bitset = bitset;
2741
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2742

P
Peter Zijlstra 已提交
2743 2744
	ret = -ERESTART_RESTARTBLOCK;

2745
out:
2746 2747 2748 2749
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
2750 2751 2752
	return ret;
}

N
Nick Piggin 已提交
2753 2754 2755

static long futex_wait_restart(struct restart_block *restart)
{
2756
	u32 __user *uaddr = restart->futex.uaddr;
2757
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
2758

2759
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
T
Thomas Gleixner 已提交
2760
		t = restart->futex.time;
2761 2762
		tp = &t;
	}
N
Nick Piggin 已提交
2763
	restart->fn = do_no_restart_syscall;
2764 2765 2766

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
2767 2768 2769
}


2770 2771 2772
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
2773 2774 2775 2776 2777
 * if there are waiters then it will block as a consequence of relying
 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
 * a 0 value of the futex too.).
 *
 * Also serves as futex trylock_pi()'ing, and due semantics.
2778
 */
2779
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2780
			 ktime_t *time, int trylock)
2781
{
2782
	struct hrtimer_sleeper timeout, *to = NULL;
2783
	struct futex_pi_state *pi_state = NULL;
2784
	struct rt_mutex_waiter rt_waiter;
2785
	struct futex_hash_bucket *hb;
2786
	struct futex_q q = futex_q_init;
2787
	int res, ret;
2788

2789 2790 2791
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2792 2793 2794
	if (refill_pi_state_cache())
		return -ENOMEM;

2795
	if (time) {
2796
		to = &timeout;
2797 2798
		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
				      HRTIMER_MODE_ABS);
2799
		hrtimer_init_sleeper(to, current);
2800
		hrtimer_set_expires(&to->timer, *time);
2801 2802
	}

2803
retry:
2804
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2805
	if (unlikely(ret != 0))
2806
		goto out;
2807

D
Darren Hart 已提交
2808
retry_private:
E
Eric Sesterhenn 已提交
2809
	hb = queue_lock(&q);
2810

2811
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2812
	if (unlikely(ret)) {
2813 2814 2815 2816
		/*
		 * Atomic work succeeded and we got the lock,
		 * or failed. Either way, we do _not_ block.
		 */
2817
		switch (ret) {
2818 2819 2820 2821 2822 2823
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2824 2825
		case -EAGAIN:
			/*
2826 2827 2828 2829
			 * Two reasons for this:
			 * - Task is exiting and we just wait for the
			 *   exit to complete.
			 * - The user space value changed.
2830
			 */
J
Jason Low 已提交
2831
			queue_unlock(hb);
2832
			put_futex_key(&q.key);
2833 2834 2835
			cond_resched();
			goto retry;
		default:
2836
			goto out_unlock_put_key;
2837 2838 2839
		}
	}

2840 2841
	WARN_ON(!q.pi_state);

2842 2843 2844
	/*
	 * Only actually queue now that the atomic ops are done:
	 */
2845
	__queue_me(&q, hb);
2846

2847
	if (trylock) {
2848
		ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2849 2850
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
2851
		goto no_block;
2852 2853
	}

2854 2855
	rt_mutex_init_waiter(&rt_waiter);

2856
	/*
2857 2858 2859 2860 2861 2862 2863 2864 2865 2866
	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
	 * hold it while doing rt_mutex_start_proxy(), because then it will
	 * include hb->lock in the blocking chain, even through we'll not in
	 * fact hold it while blocking. This will lead it to report -EDEADLK
	 * and BUG when futex_unlock_pi() interleaves with this.
	 *
	 * Therefore acquire wait_lock while holding hb->lock, but drop the
	 * latter before calling rt_mutex_start_proxy_lock(). This still fully
	 * serializes against futex_unlock_pi() as that does the exact same
	 * lock handoff sequence.
2867
	 */
2868 2869 2870 2871 2872
	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
	spin_unlock(q.lock_ptr);
	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);

2873 2874 2875 2876
	if (ret) {
		if (ret == 1)
			ret = 0;

2877
		spin_lock(q.lock_ptr);
2878 2879 2880 2881 2882 2883 2884 2885 2886
		goto no_block;
	}


	if (unlikely(to))
		hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);

	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);

2887
	spin_lock(q.lock_ptr);
2888 2889 2890 2891 2892
	/*
	 * If we failed to acquire the lock (signal/timeout), we must
	 * first acquire the hb->lock before removing the lock from the
	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
	 * wait lists consistent.
2893 2894 2895
	 *
	 * In particular; it is important that futex_unlock_pi() can not
	 * observe this inconsistency.
2896 2897 2898 2899 2900
	 */
	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
		ret = 0;

no_block:
2901 2902 2903 2904
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
2905
	res = fixup_owner(uaddr, &q, !ret);
2906 2907 2908 2909 2910 2911
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
2912

2913
	/*
2914 2915
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
2916
	 */
2917 2918 2919 2920
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
		pi_state = q.pi_state;
		get_pi_state(pi_state);
	}
2921

2922 2923
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
2924

2925 2926 2927 2928 2929
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

2930
	goto out_put_key;
2931

2932
out_unlock_put_key:
J
Jason Low 已提交
2933
	queue_unlock(hb);
2934

2935
out_put_key:
2936
	put_futex_key(&q.key);
2937
out:
2938 2939
	if (to) {
		hrtimer_cancel(&to->timer);
2940
		destroy_hrtimer_on_stack(&to->timer);
2941
	}
2942
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2943

2944
uaddr_faulted:
J
Jason Low 已提交
2945
	queue_unlock(hb);
2946

2947
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
2948 2949
	if (ret)
		goto out_put_key;
2950

2951
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2952 2953
		goto retry_private;

2954
	put_futex_key(&q.key);
D
Darren Hart 已提交
2955
	goto retry;
2956 2957 2958 2959 2960 2961 2962
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
2963
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2964
{
2965
	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2966
	union futex_key key = FUTEX_KEY_INIT;
2967
	struct futex_hash_bucket *hb;
2968
	struct futex_q *top_waiter;
D
Darren Hart 已提交
2969
	int ret;
2970

2971 2972 2973
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2974 2975 2976 2977 2978 2979
retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
2980
	if ((uval & FUTEX_TID_MASK) != vpid)
2981 2982
		return -EPERM;

2983
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
2984 2985
	if (ret)
		return ret;
2986 2987 2988 2989 2990

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
2991 2992 2993
	 * Check waiters first. We do not trust user space values at
	 * all and we at least want to know if user space fiddled
	 * with the futex value instead of blindly unlocking.
2994
	 */
2995 2996
	top_waiter = futex_top_waiter(hb, &key);
	if (top_waiter) {
2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
		struct futex_pi_state *pi_state = top_waiter->pi_state;

		ret = -EINVAL;
		if (!pi_state)
			goto out_unlock;

		/*
		 * If current does not own the pi_state then the futex is
		 * inconsistent and user space fiddled with the futex value.
		 */
		if (pi_state->owner != current)
			goto out_unlock;

3010
		get_pi_state(pi_state);
3011
		/*
3012 3013 3014 3015
		 * By taking wait_lock while still holding hb->lock, we ensure
		 * there is no point where we hold neither; and therefore
		 * wake_futex_pi() must observe a state consistent with what we
		 * observed.
3016
		 */
3017
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3018 3019
		spin_unlock(&hb->lock);

3020
		/* drops pi_state->pi_mutex.wait_lock */
3021 3022 3023 3024 3025 3026
		ret = wake_futex_pi(uaddr, uval, pi_state);

		put_pi_state(pi_state);

		/*
		 * Success, we're done! No tricky corner cases.
3027 3028 3029
		 */
		if (!ret)
			goto out_putkey;
3030
		/*
3031 3032
		 * The atomic access to the futex value generated a
		 * pagefault, so retry the user-access and the wakeup:
3033 3034 3035
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
3036 3037 3038 3039 3040 3041 3042 3043
		/*
		 * A unconditional UNLOCK_PI op raced against a waiter
		 * setting the FUTEX_WAITERS bit. Try again.
		 */
		if (ret == -EAGAIN) {
			put_futex_key(&key);
			goto retry;
		}
3044 3045 3046 3047
		/*
		 * wake_futex_pi has detected invalid state. Tell user
		 * space.
		 */
3048
		goto out_putkey;
3049
	}
3050

3051
	/*
3052 3053 3054 3055 3056
	 * We have no kernel internal state, i.e. no waiters in the
	 * kernel. Waiters which are about to queue themselves are stuck
	 * on hb->lock. So we can safely ignore them. We do neither
	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
	 * owner.
3057
	 */
3058 3059
	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
		spin_unlock(&hb->lock);
3060
		goto pi_faulted;
3061
	}
3062

3063 3064 3065 3066 3067
	/*
	 * If uval has changed, let user space handle it.
	 */
	ret = (curval == uval) ? 0 : -EAGAIN;

3068 3069
out_unlock:
	spin_unlock(&hb->lock);
3070
out_putkey:
3071
	put_futex_key(&key);
3072 3073 3074
	return ret;

pi_faulted:
3075
	put_futex_key(&key);
3076

3077
	ret = fault_in_user_writeable(uaddr);
3078
	if (!ret)
3079 3080
		goto retry;

L
Linus Torvalds 已提交
3081 3082 3083
	return ret;
}

3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
3096
 * Return:
3097 3098
 *  -  0 = no early wakeup detected;
 *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
3120
		plist_del(&q->list, &hb->chain);
3121
		hb_waiters_dec(hb);
3122

T
Thomas Gleixner 已提交
3123
		/* Handle spurious wakeups gracefully */
3124
		ret = -EWOULDBLOCK;
3125 3126
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
3127
		else if (signal_pending(current))
3128
			ret = -ERESTARTNOINTR;
3129 3130 3131 3132 3133 3134
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3135
 * @uaddr:	the futex we initially wait on (non-pi)
3136
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3137
 *		the same type, no requeueing from private to shared, etc.
3138 3139
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
3140
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
3141 3142 3143
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3144 3145 3146 3147 3148
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
3149 3150
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3151
 * via the following--
3152
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3153 3154 3155
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
3156
 *
3157
 * If 3, cleanup and return -ERESTARTNOINTR.
3158 3159 3160 3161 3162 3163 3164
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
3165
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3166 3167 3168
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
3169
 * Return:
3170 3171
 *  -  0 - On success;
 *  - <0 - On error
3172
 */
3173
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3174
				 u32 val, ktime_t *abs_time, u32 bitset,
3175
				 u32 __user *uaddr2)
3176 3177
{
	struct hrtimer_sleeper timeout, *to = NULL;
3178
	struct futex_pi_state *pi_state = NULL;
3179 3180
	struct rt_mutex_waiter rt_waiter;
	struct futex_hash_bucket *hb;
3181 3182
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
3183 3184
	int res, ret;

3185 3186 3187
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

3188 3189 3190
	if (uaddr == uaddr2)
		return -EINVAL;

3191 3192 3193 3194 3195
	if (!bitset)
		return -EINVAL;

	if (abs_time) {
		to = &timeout;
3196 3197 3198
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
3199 3200 3201 3202 3203 3204 3205 3206 3207
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
3208
	rt_mutex_init_waiter(&rt_waiter);
3209

3210
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3211 3212 3213
	if (unlikely(ret != 0))
		goto out;

3214 3215 3216 3217
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

3218 3219 3220 3221
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
3222
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
3223 3224
	if (ret)
		goto out_key2;
3225

3226 3227 3228 3229 3230
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (match_futex(&q.key, &key2)) {
3231
		queue_unlock(hb);
3232 3233 3234 3235
		ret = -EINVAL;
		goto out_put_keys;
	}

3236
	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
3237
	futex_wait_queue_me(hb, &q, to);
3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
		goto out_put_keys;

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
3249 3250 3251
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
3252 3253 3254 3255 3256 3257 3258 3259 3260 3261
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
3262
			ret = fixup_pi_state_owner(uaddr2, &q, current);
3263 3264 3265 3266
			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
				pi_state = q.pi_state;
				get_pi_state(pi_state);
			}
3267 3268 3269 3270
			/*
			 * Drop the reference to the pi state which
			 * the requeue_pi() code acquired for us.
			 */
3271
			put_pi_state(q.pi_state);
3272 3273 3274
			spin_unlock(q.lock_ptr);
		}
	} else {
3275 3276
		struct rt_mutex *pi_mutex;

3277 3278 3279 3280 3281
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
3282
		WARN_ON(!q.pi_state);
3283
		pi_mutex = &q.pi_state->pi_mutex;
3284
		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3285 3286

		spin_lock(q.lock_ptr);
3287 3288 3289 3290
		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
			ret = 0;

		debug_rt_mutex_free_waiter(&rt_waiter);
3291 3292 3293 3294
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
3295
		res = fixup_owner(uaddr2, &q, !ret);
3296 3297
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
3298
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3299 3300 3301 3302
		 */
		if (res)
			ret = (res < 0) ? res : 0;

3303 3304 3305 3306 3307
		/*
		 * If fixup_pi_state_owner() faulted and was unable to handle
		 * the fault, unlock the rt_mutex and return the fault to
		 * userspace.
		 */
3308 3309 3310 3311
		if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
			pi_state = q.pi_state;
			get_pi_state(pi_state);
		}
3312

3313 3314 3315 3316
		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

3317 3318 3319 3320 3321
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

3322
	if (ret == -EINTR) {
3323
		/*
3324 3325 3326 3327 3328
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
3329
		 */
3330
		ret = -EWOULDBLOCK;
3331 3332 3333
	}

out_put_keys:
3334
	put_futex_key(&q.key);
T
Thomas Gleixner 已提交
3335
out_key2:
3336
	put_futex_key(&key2);
3337 3338 3339 3340 3341 3342 3343 3344 3345

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

3346 3347 3348 3349 3350 3351 3352
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
3353
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3354 3355 3356 3357 3358 3359 3360 3361
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
3362 3363 3364
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
3365
 */
3366 3367
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
3368
{
3369 3370
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
3383 3384 3385 3386
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
3387
 */
3388 3389 3390
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
3391
{
A
Al Viro 已提交
3392
	struct robust_list_head __user *head;
3393
	unsigned long ret;
3394
	struct task_struct *p;
3395

3396 3397 3398
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

3399 3400 3401
	rcu_read_lock();

	ret = -ESRCH;
3402
	if (!pid)
3403
		p = current;
3404
	else {
3405
		p = find_task_by_vpid(pid);
3406 3407 3408 3409
		if (!p)
			goto err_unlock;
	}

3410
	ret = -EPERM;
3411
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3412 3413 3414 3415 3416
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

3417 3418 3419 3420 3421
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
3422
	rcu_read_unlock();
3423 3424 3425 3426 3427 3428 3429 3430

	return ret;
}

/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
3431
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3432
{
3433
	u32 uval, uninitialized_var(nval), mval;
3434

3435 3436
retry:
	if (get_user(uval, uaddr))
3437 3438
		return -1;

3439
	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3440 3441 3442 3443 3444 3445 3446 3447 3448 3449
		/*
		 * Ok, this dying thread is truly holding a futex
		 * of interest. Set the OWNER_DIED bit atomically
		 * via cmpxchg, and if the value had FUTEX_WAITERS
		 * set, wake up a waiter (if any). (We have to do a
		 * futex_wake() even if OWNER_DIED is already set -
		 * to handle the rare but possible case of recursive
		 * thread-death.) The rest of the cleanup is done in
		 * userspace.
		 */
3450
		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
		/*
		 * We are not holding a lock here, but we want to have
		 * the pagefault_disable/enable() protection because
		 * we want to handle the fault gracefully. If the
		 * access fails we try to fault in the futex with R/W
		 * verification via get_user_pages. get_user() above
		 * does not guarantee R/W access. If that fails we
		 * give up and leave the futex locked.
		 */
		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
		}
3465
		if (nval != uval)
3466
			goto retry;
3467

3468 3469 3470 3471
		/*
		 * Wake robust non-PI futexes here. The wakeup of
		 * PI futexes happens in exit_pi_state():
		 */
T
Thomas Gleixner 已提交
3472
		if (!pi && (uval & FUTEX_WAITERS))
P
Peter Zijlstra 已提交
3473
			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3474 3475 3476 3477
	}
	return 0;
}

3478 3479 3480 3481
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
3482
				     struct robust_list __user * __user *head,
3483
				     unsigned int *pi)
3484 3485 3486
{
	unsigned long uentry;

A
Al Viro 已提交
3487
	if (get_user(uentry, (unsigned long __user *)head))
3488 3489
		return -EFAULT;

A
Al Viro 已提交
3490
	*entry = (void __user *)(uentry & ~1UL);
3491 3492 3493 3494 3495
	*pi = uentry & 1;

	return 0;
}

3496 3497 3498 3499 3500 3501 3502 3503 3504
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void exit_robust_list(struct task_struct *curr)
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
3505
	struct robust_list __user *entry, *next_entry, *pending;
3506 3507
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
3508
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
3509
	int rc;
3510

3511 3512 3513
	if (!futex_cmpxchg_enabled)
		return;

3514 3515 3516 3517
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
3518
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
3529
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3530
		return;
3531

M
Martin Schwidefsky 已提交
3532
	next_entry = NULL;	/* avoid warning with gcc */
3533
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
3534 3535 3536 3537 3538
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3539 3540
		/*
		 * A pending lock might already be on the list, so
3541
		 * don't process it twice:
3542 3543
		 */
		if (entry != pending)
A
Al Viro 已提交
3544
			if (handle_futex_death((void __user *)entry + futex_offset,
3545
						curr, pi))
3546
				return;
M
Martin Schwidefsky 已提交
3547
		if (rc)
3548
			return;
M
Martin Schwidefsky 已提交
3549 3550
		entry = next_entry;
		pi = next_pi;
3551 3552 3553 3554 3555 3556 3557 3558
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
3559 3560 3561 3562

	if (pending)
		handle_futex_death((void __user *)pending + futex_offset,
				   curr, pip);
3563 3564
}

3565
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3566
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
3567
{
T
Thomas Gleixner 已提交
3568
	int cmd = op & FUTEX_CMD_MASK;
3569
	unsigned int flags = 0;
E
Eric Dumazet 已提交
3570 3571

	if (!(op & FUTEX_PRIVATE_FLAG))
3572
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
3573

3574 3575
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
3576 3577
		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
		    cmd != FUTEX_WAIT_REQUEUE_PI)
3578 3579
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
3580

3581 3582 3583 3584 3585 3586 3587 3588 3589 3590
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
3591
	switch (cmd) {
L
Linus Torvalds 已提交
3592
	case FUTEX_WAIT:
3593
		val3 = FUTEX_BITSET_MATCH_ANY;
3594
		/* fall through */
3595
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
3596
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
3597
	case FUTEX_WAKE:
3598
		val3 = FUTEX_BITSET_MATCH_ANY;
3599
		/* fall through */
3600
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
3601
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
3602
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
3603
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
3604
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
3605
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3606
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
3607
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3608
	case FUTEX_LOCK_PI:
3609
		return futex_lock_pi(uaddr, flags, timeout, 0);
3610
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
3611
		return futex_unlock_pi(uaddr, flags);
3612
	case FUTEX_TRYLOCK_PI:
3613
		return futex_lock_pi(uaddr, flags, NULL, 1);
3614 3615
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
3616 3617
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
3618
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
3619
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
3620
	}
T
Thomas Gleixner 已提交
3621
	return -ENOSYS;
L
Linus Torvalds 已提交
3622 3623 3624
}


3625
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3626
		struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3627
		u32, val3)
L
Linus Torvalds 已提交
3628
{
3629
	struct timespec64 ts;
3630
	ktime_t t, *tp = NULL;
3631
	u32 val2 = 0;
E
Eric Dumazet 已提交
3632
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
3633

3634
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3635 3636
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3637 3638
		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
			return -EFAULT;
3639
		if (get_timespec64(&ts, utime))
L
Linus Torvalds 已提交
3640
			return -EFAULT;
3641
		if (!timespec64_valid(&ts))
3642
			return -EINVAL;
3643

3644
		t = timespec64_to_ktime(ts);
E
Eric Dumazet 已提交
3645
		if (cmd == FUTEX_WAIT)
3646
			t = ktime_add_safe(ktime_get(), t);
3647
		tp = &t;
L
Linus Torvalds 已提交
3648 3649
	}
	/*
3650
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3651
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
3652
	 */
3653
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3654
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3655
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
3656

3657
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
3658 3659
}

3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814
#ifdef CONFIG_COMPAT
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int
compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
		   compat_uptr_t __user *head, unsigned int *pi)
{
	if (get_user(*uentry, head))
		return -EFAULT;

	*entry = compat_ptr((*uentry) & ~1);
	*pi = (unsigned int)(*uentry) & 1;

	return 0;
}

static void __user *futex_uaddr(struct robust_list __user *entry,
				compat_long_t futex_offset)
{
	compat_uptr_t base = ptr_to_compat(entry);
	void __user *uaddr = compat_ptr(base + futex_offset);

	return uaddr;
}

/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void compat_exit_robust_list(struct task_struct *curr)
{
	struct compat_robust_list_head __user *head = curr->compat_robust_list;
	struct robust_list __user *entry, *next_entry, *pending;
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
	compat_uptr_t uentry, next_uentry, upending;
	compat_long_t futex_offset;
	int rc;

	if (!futex_cmpxchg_enabled)
		return;

	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
	if (compat_fetch_robust_entry(&upending, &pending,
			       &head->list_op_pending, &pip))
		return;

	next_entry = NULL;	/* avoid warning with gcc */
	while (entry != (struct robust_list __user *) &head->list) {
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
			(compat_uptr_t __user *)&entry->next, &next_pi);
		/*
		 * A pending lock might already be on the list, so
		 * dont process it twice:
		 */
		if (entry != pending) {
			void __user *uaddr = futex_uaddr(entry, futex_offset);

			if (handle_futex_death(uaddr, curr, pi))
				return;
		}
		if (rc)
			return;
		uentry = next_uentry;
		entry = next_entry;
		pi = next_pi;
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
	if (pending) {
		void __user *uaddr = futex_uaddr(pending, futex_offset);

		handle_futex_death(uaddr, curr, pip);
	}
}

COMPAT_SYSCALL_DEFINE2(set_robust_list,
		struct compat_robust_list_head __user *, head,
		compat_size_t, len)
{
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->compat_robust_list = head;

	return 0;
}

COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
			compat_uptr_t __user *, head_ptr,
			compat_size_t __user *, len_ptr)
{
	struct compat_robust_list_head __user *head;
	unsigned long ret;
	struct task_struct *p;

	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	rcu_read_lock();

	ret = -ESRCH;
	if (!pid)
		p = current;
	else {
		p = find_task_by_vpid(pid);
		if (!p)
			goto err_unlock;
	}

	ret = -EPERM;
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
		goto err_unlock;

	head = p->compat_robust_list;
	rcu_read_unlock();

	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(ptr_to_compat(head), head_ptr);

err_unlock:
	rcu_read_unlock();

	return ret;
}
3815
#endif /* CONFIG_COMPAT */
3816

3817
#ifdef CONFIG_COMPAT_32BIT_TIME
3818 3819 3820 3821
COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
		struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
		u32, val3)
{
3822
	struct timespec64 ts;
3823 3824 3825 3826 3827 3828 3829
	ktime_t t, *tp = NULL;
	int val2 = 0;
	int cmd = op & FUTEX_CMD_MASK;

	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3830
		if (get_old_timespec32(&ts, utime))
3831
			return -EFAULT;
3832
		if (!timespec64_valid(&ts))
3833 3834
			return -EINVAL;

3835
		t = timespec64_to_ktime(ts);
3836 3837 3838 3839 3840 3841 3842 3843 3844 3845
		if (cmd == FUTEX_WAIT)
			t = ktime_add_safe(ktime_get(), t);
		tp = &t;
	}
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
		val2 = (int) (unsigned long) utime;

	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
3846
#endif /* CONFIG_COMPAT_32BIT_TIME */
3847

3848
static void __init futex_detect_cmpxchg(void)
L
Linus Torvalds 已提交
3849
{
3850
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3851
	u32 curval;
3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869

	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
	 * implementation, the non-functional ones will return
	 * -ENOSYS.
	 */
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
		futex_cmpxchg_enabled = 1;
#endif
}

static int __init futex_init(void)
{
3870
	unsigned int futex_shift;
3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881
	unsigned long i;

#if CONFIG_BASE_SMALL
	futex_hashsize = 16;
#else
	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif

	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
					       futex_hashsize, 0,
					       futex_hashsize < 256 ? HASH_SMALL : 0,
3882 3883 3884
					       &futex_shift, NULL,
					       futex_hashsize, futex_hashsize);
	futex_hashsize = 1UL << futex_shift;
3885 3886

	futex_detect_cmpxchg();
3887

3888
	for (i = 0; i < futex_hashsize; i++) {
3889
		atomic_set(&futex_queues[i].waiters, 0);
3890
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
3891 3892 3893
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
3894 3895
	return 0;
}
3896
core_initcall(futex_init);