futex.c 104.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
11 12 13 14
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
15 16 17 18
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
19 20 21
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
22 23 24 25
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
47
#include <linux/compat.h>
L
Linus Torvalds 已提交
48 49 50 51 52 53 54 55 56 57
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
58
#include <linux/signal.h>
59
#include <linux/export.h>
60
#include <linux/magic.h>
61 62
#include <linux/pid.h>
#include <linux/nsproxy.h>
63
#include <linux/ptrace.h>
64
#include <linux/sched/rt.h>
65
#include <linux/sched/wake_q.h>
66
#include <linux/sched/mm.h>
67
#include <linux/hugetlb.h>
C
Colin Cross 已提交
68
#include <linux/freezer.h>
M
Mike Rapoport 已提交
69
#include <linux/memblock.h>
70
#include <linux/fault-inject.h>
71
#include <linux/refcount.h>
72

73
#include <asm/futex.h>
L
Linus Torvalds 已提交
74

75
#include "locking/rtmutex_common.h"
76

77
/*
78 79 80 81
 * READ this before attempting to hack on futexes!
 *
 * Basic futex operation and ordering guarantees
 * =============================================
82 83 84 85
 *
 * The waiter reads the futex value in user space and calls
 * futex_wait(). This function computes the hash bucket and acquires
 * the hash bucket lock. After that it reads the futex user space value
86 87 88
 * again and verifies that the data has not changed. If it has not changed
 * it enqueues itself into the hash bucket, releases the hash bucket lock
 * and schedules.
89 90
 *
 * The waker side modifies the user space value of the futex and calls
91 92 93
 * futex_wake(). This function computes the hash bucket and acquires the
 * hash bucket lock. Then it looks for waiters on that futex in the hash
 * bucket and wakes them.
94
 *
95 96 97 98 99
 * In futex wake up scenarios where no tasks are blocked on a futex, taking
 * the hb spinlock can be avoided and simply return. In order for this
 * optimization to work, ordering guarantees must exist so that the waiter
 * being added to the list is acknowledged when the list is concurrently being
 * checked by the waker, avoiding scenarios like the following:
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
 *
 * CPU 0                               CPU 1
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
 *   uval = *futex;
 *                                     *futex = newval;
 *                                     sys_futex(WAKE, futex);
 *                                       futex_wake(futex);
 *                                       if (queue_empty())
 *                                         return;
 *   if (uval == val)
 *      lock(hash_bucket(futex));
 *      queue();
 *     unlock(hash_bucket(futex));
 *     schedule();
 *
 * This would cause the waiter on CPU 0 to wait forever because it
 * missed the transition of the user space value from val to newval
 * and the waker did not find the waiter in the hash bucket queue.
 *
121 122 123 124 125
 * The correct serialization ensures that a waiter either observes
 * the changed user space value before blocking or is woken by a
 * concurrent waker:
 *
 * CPU 0                                 CPU 1
126 127 128
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
129
 *
130
 *   waiters++; (a)
131 132 133 134 135 136 137 138 139 140
 *   smp_mb(); (A) <-- paired with -.
 *                                  |
 *   lock(hash_bucket(futex));      |
 *                                  |
 *   uval = *futex;                 |
 *                                  |        *futex = newval;
 *                                  |        sys_futex(WAKE, futex);
 *                                  |          futex_wake(futex);
 *                                  |
 *                                  `--------> smp_mb(); (B)
141
 *   if (uval == val)
142
 *     queue();
143
 *     unlock(hash_bucket(futex));
144 145
 *     schedule();                         if (waiters)
 *                                           lock(hash_bucket(futex));
146 147
 *   else                                    wake_waiters(futex);
 *     waiters--; (b)                        unlock(hash_bucket(futex));
148
 *
149 150
 * Where (A) orders the waiters increment and the futex value read through
 * atomic operations (see hb_waiters_inc) and where (B) orders the write
151 152
 * to futex and the waiters read -- this is done by the barriers for both
 * shared and private futexes in get_futex_key_refs().
153 154 155 156 157 158 159 160 161 162 163 164
 *
 * This yields the following case (where X:=waiters, Y:=futex):
 *
 *	X = Y = 0
 *
 *	w[X]=1		w[Y]=1
 *	MB		MB
 *	r[Y]=y		r[X]=x
 *
 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 * the guarantee that we cannot both miss the futex variable change and the
 * enqueue.
165 166 167 168 169 170 171 172 173 174 175
 *
 * Note that a new waiter is accounted for in (a) even when it is possible that
 * the wait call can return error, in which case we backtrack from it in (b).
 * Refer to the comment in queue_lock().
 *
 * Similarly, in order to account for waiters being requeued on another
 * address we always increment the waiters for the destination bucket before
 * acquiring the lock. It then decrements them again  after releasing it -
 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 * will do the additional required waiter count housekeeping. This is done for
 * double_lock_hb() and double_unlock_hb(), respectively.
176 177
 */

178 179 180 181
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
static int  __read_mostly futex_cmpxchg_enabled;
182
#endif
183

184 185 186 187
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
188 189 190 191 192 193 194 195 196
#ifdef CONFIG_MMU
# define FLAGS_SHARED		0x01
#else
/*
 * NOMMU does not have per process address space. Let the compiler optimize
 * code away.
 */
# define FLAGS_SHARED		0x00
#endif
197 198 199
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
216
	refcount_t refcount;
217 218

	union futex_key key;
219
} __randomize_layout;
220

221 222
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
223
 * @list:		priority-sorted list of tasks waiting on this futex
224 225 226 227 228 229 230 231
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
232
 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
L
Linus Torvalds 已提交
233 234 235
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
236
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
237
 * The order of wakeup is always to make the first condition true, then
238 239 240 241
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
242 243
 */
struct futex_q {
P
Pierre Peiffer 已提交
244
	struct plist_node list;
L
Linus Torvalds 已提交
245

246
	struct task_struct *task;
L
Linus Torvalds 已提交
247 248
	spinlock_t *lock_ptr;
	union futex_key key;
249
	struct futex_pi_state *pi_state;
250
	struct rt_mutex_waiter *rt_waiter;
251
	union futex_key *requeue_pi_key;
252
	u32 bitset;
253
} __randomize_layout;
L
Linus Torvalds 已提交
254

255 256 257 258 259 260
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
261
/*
D
Darren Hart 已提交
262 263 264
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
265 266
 */
struct futex_hash_bucket {
267
	atomic_t waiters;
P
Pierre Peiffer 已提交
268 269
	spinlock_t lock;
	struct plist_head chain;
270
} ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
271

272 273 274 275 276 277 278 279 280 281 282
/*
 * The base of the bucket array and its size are always used together
 * (after initialization only in hash_futex()), so ensure that they
 * reside in the same cacheline.
 */
static struct {
	struct futex_hash_bucket *queues;
	unsigned long            hashsize;
} __futex_data __read_mostly __aligned(2*sizeof(long));
#define futex_queues   (__futex_data.queues)
#define futex_hashsize (__futex_data.hashsize)
283

L
Linus Torvalds 已提交
284

285 286 287 288 289 290 291 292
/*
 * Fault injections for futexes.
 */
#ifdef CONFIG_FAIL_FUTEX

static struct {
	struct fault_attr attr;

293
	bool ignore_private;
294 295
} fail_futex = {
	.attr = FAULT_ATTR_INITIALIZER,
296
	.ignore_private = false,
297 298 299 300 301 302 303 304
};

static int __init setup_fail_futex(char *str)
{
	return setup_fault_attr(&fail_futex.attr, str);
}
__setup("fail_futex=", setup_fail_futex);

305
static bool should_fail_futex(bool fshared)
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
{
	if (fail_futex.ignore_private && !fshared)
		return false;

	return should_fail(&fail_futex.attr, 1);
}

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init fail_futex_debugfs(void)
{
	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
	struct dentry *dir;

	dir = fault_create_debugfs_attr("fail_futex", NULL,
					&fail_futex.attr);
	if (IS_ERR(dir))
		return PTR_ERR(dir);

325 326
	debugfs_create_bool("ignore-private", mode, dir,
			    &fail_futex.ignore_private);
327 328 329 330 331 332 333 334 335 336 337 338 339 340
	return 0;
}

late_initcall(fail_futex_debugfs);

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

#else
static inline bool should_fail_futex(bool fshared)
{
	return false;
}
#endif /* CONFIG_FAIL_FUTEX */

341 342
static inline void futex_get_mm(union futex_key *key)
{
V
Vegard Nossum 已提交
343
	mmgrab(key->private.mm);
344 345 346
	/*
	 * Ensure futex_get_mm() implies a full barrier such that
	 * get_futex_key() implies a full barrier. This is relied upon
347
	 * as smp_mb(); (B), see the ordering comment above.
348
	 */
349
	smp_mb__after_atomic();
350 351
}

352 353 354 355
/*
 * Reflects a new waiter being added to the waitqueue.
 */
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
356 357
{
#ifdef CONFIG_SMP
358
	atomic_inc(&hb->waiters);
359
	/*
360
	 * Full barrier (A), see the ordering comment above.
361
	 */
362
	smp_mb__after_atomic();
363 364 365 366 367 368 369 370 371 372 373 374 375
#endif
}

/*
 * Reflects a waiter being removed from the waitqueue by wakeup
 * paths.
 */
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	atomic_dec(&hb->waiters);
#endif
}
376

377 378 379 380
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	return atomic_read(&hb->waiters);
381
#else
382
	return 1;
383 384 385
#endif
}

386 387 388 389 390 391
/**
 * hash_futex - Return the hash bucket in the global hash
 * @key:	Pointer to the futex key for which the hash is calculated
 *
 * We hash on the keys returned from get_futex_key (see below) and return the
 * corresponding hash bucket in the global hash.
L
Linus Torvalds 已提交
392 393 394 395 396 397
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
398
	return &futex_queues[hash & (futex_hashsize - 1)];
L
Linus Torvalds 已提交
399 400
}

401 402 403 404 405 406

/**
 * match_futex - Check whether two futex keys are equal
 * @key1:	Pointer to key1
 * @key2:	Pointer to key2
 *
L
Linus Torvalds 已提交
407 408 409 410
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
411 412
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
413 414 415 416
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

417 418 419 420 421 422 423 424 425 426
/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
static void get_futex_key_refs(union futex_key *key)
{
	if (!key->both.ptr)
		return;

427 428 429 430 431 432 433 434 435 436
	/*
	 * On MMU less systems futexes are always "private" as there is no per
	 * process address space. We need the smp wmb nevertheless - yes,
	 * arch/blackfin has MMU less SMP ...
	 */
	if (!IS_ENABLED(CONFIG_MMU)) {
		smp_mb(); /* explicit smp_mb(); (B) */
		return;
	}

437 438
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
439
		ihold(key->shared.inode); /* implies smp_mb(); (B) */
440 441
		break;
	case FUT_OFF_MMSHARED:
442
		futex_get_mm(key); /* implies smp_mb(); (B) */
443
		break;
444
	default:
445 446 447 448 449
		/*
		 * Private futexes do not hold reference on an inode or
		 * mm, therefore the only purpose of calling get_futex_key_refs
		 * is because we need the barrier for the lockless waiter check.
		 */
450
		smp_mb(); /* explicit smp_mb(); (B) */
451 452 453 454 455
	}
}

/*
 * Drop a reference to the resource addressed by a key.
456 457 458
 * The hash bucket spinlock must not be held. This is
 * a no-op for private futexes, see comment in the get
 * counterpart.
459 460 461
 */
static void drop_futex_key_refs(union futex_key *key)
{
462 463 464
	if (!key->both.ptr) {
		/* If we're here then we tried to put a key we failed to get */
		WARN_ON_ONCE(1);
465
		return;
466
	}
467

468 469 470
	if (!IS_ENABLED(CONFIG_MMU))
		return;

471 472 473 474 475 476 477 478 479 480
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
		iput(key->shared.inode);
		break;
	case FUT_OFF_MMSHARED:
		mmdrop(key->private.mm);
		break;
	}
}

481 482 483 484 485
enum futex_access {
	FUTEX_READ,
	FUTEX_WRITE
};

E
Eric Dumazet 已提交
486
/**
487 488 489 490
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 * @key:	address where result is stored.
491 492
 * @rw:		mapping needs to be read/write (values: FUTEX_READ,
 *              FUTEX_WRITE)
E
Eric Dumazet 已提交
493
 *
494 495
 * Return: a negative error code or 0
 *
496
 * The key words are stored in @key on success.
L
Linus Torvalds 已提交
497
 *
A
Al Viro 已提交
498
 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
L
Linus Torvalds 已提交
499 500 501
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
D
Darren Hart 已提交
502
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
503
 */
504
static int
505
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw)
L
Linus Torvalds 已提交
506
{
507
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
508
	struct mm_struct *mm = current->mm;
509
	struct page *page, *tail;
510
	struct address_space *mapping;
511
	int err, ro = 0;
L
Linus Torvalds 已提交
512 513 514 515

	/*
	 * The futex address must be "naturally" aligned.
	 */
516
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
517
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
518
		return -EINVAL;
519
	address -= key->both.offset;
L
Linus Torvalds 已提交
520

521
	if (unlikely(!access_ok(uaddr, sizeof(u32))))
522 523
		return -EFAULT;

524 525 526
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

E
Eric Dumazet 已提交
527 528 529 530 531 532 533 534 535 536
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		key->private.mm = mm;
		key->private.address = address;
537
		get_futex_key_refs(key);  /* implies smp_mb(); (B) */
E
Eric Dumazet 已提交
538 539
		return 0;
	}
L
Linus Torvalds 已提交
540

541
again:
542 543 544 545
	/* Ignore any VERIFY_READ mapping (futex common case) */
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

546
	err = get_user_pages_fast(address, 1, 1, &page);
547 548 549 550
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
551
	if (err == -EFAULT && rw == FUTEX_READ) {
552 553 554
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
555 556
	if (err < 0)
		return err;
557 558
	else
		err = 0;
559

560 561 562 563 564 565 566 567 568 569
	/*
	 * The treatment of mapping from this point on is critical. The page
	 * lock protects many things but in this context the page lock
	 * stabilizes mapping, prevents inode freeing in the shared
	 * file-backed region case and guards against movement to swap cache.
	 *
	 * Strictly speaking the page lock is not needed in all cases being
	 * considered here and page lock forces unnecessarily serialization
	 * From this point on, mapping will be re-verified if necessary and
	 * page lock will be acquired only if it is unavoidable
570 571 572 573 574 575 576
	 *
	 * Mapping checks require the head page for any compound page so the
	 * head page and mapping is looked up now. For anonymous pages, it
	 * does not matter if the page splits in the future as the key is
	 * based on the address. For filesystem-backed pages, the tail is
	 * required as the index of the page determines the key. For
	 * base pages, there is no tail page and tail == page.
577
	 */
578
	tail = page;
579 580 581
	page = compound_head(page);
	mapping = READ_ONCE(page->mapping);

582
	/*
583
	 * If page->mapping is NULL, then it cannot be a PageAnon
584 585 586 587 588 589 590 591 592 593 594
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
595
	 * an unlikely race, but we do need to retry for page->mapping.
596
	 */
597 598 599 600 601 602 603 604 605 606
	if (unlikely(!mapping)) {
		int shmem_swizzled;

		/*
		 * Page lock is required to identify which special case above
		 * applies. If this is really a shmem page then the page lock
		 * will prevent unexpected transitions.
		 */
		lock_page(page);
		shmem_swizzled = PageSwapCache(page) || page->mapping;
607 608
		unlock_page(page);
		put_page(page);
609

610 611
		if (shmem_swizzled)
			goto again;
612

613
		return -EFAULT;
614
	}
L
Linus Torvalds 已提交
615 616 617 618

	/*
	 * Private mappings are handled in a simple way.
	 *
619 620 621
	 * If the futex key is stored on an anonymous page, then the associated
	 * object is the mm which is implicitly pinned by the calling process.
	 *
L
Linus Torvalds 已提交
622 623
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
624
	 * the object not the particular process.
L
Linus Torvalds 已提交
625
	 */
626
	if (PageAnon(page)) {
627 628 629 630
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
631
		if (unlikely(should_fail_futex(fshared)) || ro) {
632 633 634 635
			err = -EFAULT;
			goto out;
		}

636
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
637
		key->private.mm = mm;
638
		key->private.address = address;
639 640 641

		get_futex_key_refs(key); /* implies smp_mb(); (B) */

642
	} else {
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
		struct inode *inode;

		/*
		 * The associated futex object in this case is the inode and
		 * the page->mapping must be traversed. Ordinarily this should
		 * be stabilised under page lock but it's not strictly
		 * necessary in this case as we just want to pin the inode, not
		 * update the radix tree or anything like that.
		 *
		 * The RCU read lock is taken as the inode is finally freed
		 * under RCU. If the mapping still matches expectations then the
		 * mapping->host can be safely accessed as being a valid inode.
		 */
		rcu_read_lock();

		if (READ_ONCE(page->mapping) != mapping) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		inode = READ_ONCE(mapping->host);
		if (!inode) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/*
		 * Take a reference unless it is about to be freed. Previously
		 * this reference was taken by ihold under the page lock
		 * pinning the inode in place so i_lock was unnecessary. The
		 * only way for this check to fail is if the inode was
678 679
		 * truncated in parallel which is almost certainly an
		 * application bug. In such a case, just retry.
680 681 682 683 684
		 *
		 * We are not calling into get_futex_key_refs() in file-backed
		 * cases, therefore a successful atomic_inc return below will
		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
		 */
685
		if (!atomic_inc_not_zero(&inode->i_count)) {
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/* Should be impossible but lets be paranoid for now */
		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
			err = -EFAULT;
			rcu_read_unlock();
			iput(inode);

			goto out;
		}

701
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
702
		key->shared.inode = inode;
703
		key->shared.pgoff = basepage_index(tail);
704
		rcu_read_unlock();
L
Linus Torvalds 已提交
705 706
	}

707
out:
708
	put_page(page);
709
	return err;
L
Linus Torvalds 已提交
710 711
}

712
static inline void put_futex_key(union futex_key *key)
L
Linus Torvalds 已提交
713
{
714
	drop_futex_key_refs(key);
L
Linus Torvalds 已提交
715 716
}

717 718
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
719 720 721 722 723
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
724
 * We have no generic implementation of a non-destructive write to the
725 726 727 728 729 730
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
731 732 733 734
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
735
	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
736
			       FAULT_FLAG_WRITE, NULL);
737 738
	up_read(&mm->mmap_sem);

739 740 741
	return ret < 0 ? ret : 0;
}

742 743
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
744 745
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

761 762
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
763
{
764
	int ret;
T
Thomas Gleixner 已提交
765 766

	pagefault_disable();
767
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
768 769
	pagefault_enable();

770
	return ret;
T
Thomas Gleixner 已提交
771 772 773
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
774 775 776
{
	int ret;

777
	pagefault_disable();
778
	ret = __get_user(*dest, from);
779
	pagefault_enable();
L
Linus Torvalds 已提交
780 781 782 783

	return ret ? -EFAULT : 0;
}

784 785 786 787 788 789 790 791 792 793 794

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

795
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
796 797 798 799 800 801 802

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
803
	refcount_set(&pi_state->refcount, 1);
804
	pi_state->key = FUTEX_KEY_INIT;
805 806 807 808 809 810

	current->pi_state_cache = pi_state;

	return 0;
}

P
Peter Zijlstra 已提交
811
static struct futex_pi_state *alloc_pi_state(void)
812 813 814 815 816 817 818 819 820
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

P
Peter Zijlstra 已提交
821 822
static void get_pi_state(struct futex_pi_state *pi_state)
{
823
	WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
P
Peter Zijlstra 已提交
824 825
}

826
/*
827 828
 * Drops a reference to the pi_state object and frees or caches it
 * when the last reference is gone.
829
 */
830
static void put_pi_state(struct futex_pi_state *pi_state)
831
{
832 833 834
	if (!pi_state)
		return;

835
	if (!refcount_dec_and_test(&pi_state->refcount))
836 837 838 839 840 841 842
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
843
		struct task_struct *owner;
844

845 846 847 848 849 850 851 852 853
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		owner = pi_state->owner;
		if (owner) {
			raw_spin_lock(&owner->pi_lock);
			list_del_init(&pi_state->list);
			raw_spin_unlock(&owner->pi_lock);
		}
		rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
854 855
	}

856
	if (current->pi_state_cache) {
857
		kfree(pi_state);
858
	} else {
859 860 861 862 863 864
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
865
		refcount_set(&pi_state->refcount, 1);
866 867 868 869
		current->pi_state_cache = pi_state;
	}
}

870 871
#ifdef CONFIG_FUTEX_PI

872 873 874 875 876 877 878 879 880
/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
void exit_pi_state_list(struct task_struct *curr)
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
881
	struct futex_hash_bucket *hb;
882
	union futex_key key = FUTEX_KEY_INIT;
883

884 885
	if (!futex_cmpxchg_enabled)
		return;
886 887 888
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
889
	 * versus waiters unqueueing themselves:
890
	 */
891
	raw_spin_lock_irq(&curr->pi_lock);
892 893 894 895
	while (!list_empty(head)) {
		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
896
		hb = hash_futex(&key);
897 898 899 900 901 902 903 904 905 906 907

		/*
		 * We can race against put_pi_state() removing itself from the
		 * list (a waiter going away). put_pi_state() will first
		 * decrement the reference count and then modify the list, so
		 * its possible to see the list entry but fail this reference
		 * acquire.
		 *
		 * In that case; drop the locks to let put_pi_state() make
		 * progress and retry the loop.
		 */
908
		if (!refcount_inc_not_zero(&pi_state->refcount)) {
909 910 911 912 913
			raw_spin_unlock_irq(&curr->pi_lock);
			cpu_relax();
			raw_spin_lock_irq(&curr->pi_lock);
			continue;
		}
914
		raw_spin_unlock_irq(&curr->pi_lock);
915 916

		spin_lock(&hb->lock);
917 918
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		raw_spin_lock(&curr->pi_lock);
919 920 921 922
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
923
		if (head->next != next) {
924
			/* retain curr->pi_lock for the loop invariant */
925
			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
926
			spin_unlock(&hb->lock);
927
			put_pi_state(pi_state);
928 929 930 931
			continue;
		}

		WARN_ON(pi_state->owner != curr);
932 933
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
934 935
		pi_state->owner = NULL;

936
		raw_spin_unlock(&curr->pi_lock);
937
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
938 939
		spin_unlock(&hb->lock);

940 941 942
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);

943
		raw_spin_lock_irq(&curr->pi_lock);
944
	}
945
	raw_spin_unlock_irq(&curr->pi_lock);
946 947
}

948 949
#endif

950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
/*
 * We need to check the following states:
 *
 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 *
 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 *
 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 *
 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 *
 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 *
 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 *
 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 *
 * [1]	Indicates that the kernel can acquire the futex atomically. We
 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 *
 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 *      thread is found then it indicates that the owner TID has died.
 *
 * [3]	Invalid. The waiter is queued on a non PI futex
 *
 * [4]	Valid state after exit_robust_list(), which sets the user space
 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 *
 * [5]	The user space value got manipulated between exit_robust_list()
 *	and exit_pi_state_list()
 *
 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 *	the pi_state but cannot access the user space value.
 *
 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 *
 * [8]	Owner and user space value match
 *
 * [9]	There is no transient state which sets the user space TID to 0
 *	except exit_robust_list(), but this is indicated by the
 *	FUTEX_OWNER_DIED bit. See [4]
 *
 * [10] There is no transient state which leaves owner and user space
 *	TID out of sync.
P
Peter Zijlstra 已提交
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
 *
 *
 * Serialization and lifetime rules:
 *
 * hb->lock:
 *
 *	hb -> futex_q, relation
 *	futex_q -> pi_state, relation
 *
 *	(cannot be raw because hb can contain arbitrary amount
 *	 of futex_q's)
 *
 * pi_mutex->wait_lock:
 *
 *	{uval, pi_state}
 *
 *	(and pi_mutex 'obviously')
 *
 * p->pi_lock:
 *
 *	p->pi_state_list -> pi_state->list, relation
 *
 * pi_state->refcount:
 *
 *	pi_state lifetime
 *
 *
 * Lock order:
 *
 *   hb->lock
 *     pi_mutex->wait_lock
 *       p->pi_lock
 *
1031
 */
1032 1033 1034 1035 1036 1037

/*
 * Validate that the existing waiter has a pi_state and sanity check
 * the pi_state against the user space value. If correct, attach to
 * it.
 */
P
Peter Zijlstra 已提交
1038 1039
static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
			      struct futex_pi_state *pi_state,
1040
			      struct futex_pi_state **ps)
1041
{
1042
	pid_t pid = uval & FUTEX_TID_MASK;
1043 1044
	u32 uval2;
	int ret;
1045

1046 1047 1048 1049 1050
	/*
	 * Userspace might have messed up non-PI and PI futexes [3]
	 */
	if (unlikely(!pi_state))
		return -EINVAL;
1051

P
Peter Zijlstra 已提交
1052 1053 1054 1055 1056 1057
	/*
	 * We get here with hb->lock held, and having found a
	 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
	 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
	 * which in turn means that futex_lock_pi() still has a reference on
	 * our pi_state.
1058 1059 1060 1061 1062
	 *
	 * The waiter holding a reference on @pi_state also protects against
	 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
	 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
	 * free pi_state before we can take a reference ourselves.
P
Peter Zijlstra 已提交
1063
	 */
1064
	WARN_ON(!refcount_read(&pi_state->refcount));
1065

P
Peter Zijlstra 已提交
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	/*
	 * Now that we have a pi_state, we can acquire wait_lock
	 * and do the state validation.
	 */
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	/*
	 * Since {uval, pi_state} is serialized by wait_lock, and our current
	 * uval was read without holding it, it can have changed. Verify it
	 * still is what we expect it to be, otherwise retry the entire
	 * operation.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		goto out_efault;

	if (uval != uval2)
		goto out_eagain;

1084 1085 1086 1087
	/*
	 * Handle the owner died case:
	 */
	if (uval & FUTEX_OWNER_DIED) {
1088
		/*
1089 1090 1091
		 * exit_pi_state_list sets owner to NULL and wakes the
		 * topmost waiter. The task which acquires the
		 * pi_state->rt_mutex will fixup owner.
1092
		 */
1093
		if (!pi_state->owner) {
1094
			/*
1095 1096
			 * No pi state owner, but the user space TID
			 * is not 0. Inconsistent state. [5]
1097
			 */
1098
			if (pid)
P
Peter Zijlstra 已提交
1099
				goto out_einval;
1100
			/*
1101
			 * Take a ref on the state and return success. [4]
1102
			 */
P
Peter Zijlstra 已提交
1103
			goto out_attach;
1104
		}
1105 1106

		/*
1107 1108 1109 1110 1111 1112 1113 1114
		 * If TID is 0, then either the dying owner has not
		 * yet executed exit_pi_state_list() or some waiter
		 * acquired the rtmutex in the pi state, but did not
		 * yet fixup the TID in user space.
		 *
		 * Take a ref on the state and return success. [6]
		 */
		if (!pid)
P
Peter Zijlstra 已提交
1115
			goto out_attach;
1116 1117 1118 1119
	} else {
		/*
		 * If the owner died bit is not set, then the pi_state
		 * must have an owner. [7]
1120
		 */
1121
		if (!pi_state->owner)
P
Peter Zijlstra 已提交
1122
			goto out_einval;
1123 1124
	}

1125 1126 1127 1128 1129 1130
	/*
	 * Bail out if user space manipulated the futex value. If pi
	 * state exists then the owner TID must be the same as the
	 * user space TID. [9/10]
	 */
	if (pid != task_pid_vnr(pi_state->owner))
P
Peter Zijlstra 已提交
1131 1132 1133
		goto out_einval;

out_attach:
P
Peter Zijlstra 已提交
1134
	get_pi_state(pi_state);
P
Peter Zijlstra 已提交
1135
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1136 1137
	*ps = pi_state;
	return 0;
P
Peter Zijlstra 已提交
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153

out_einval:
	ret = -EINVAL;
	goto out_error;

out_eagain:
	ret = -EAGAIN;
	goto out_error;

out_efault:
	ret = -EFAULT;
	goto out_error;

out_error:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
1154 1155
}

T
Thomas Gleixner 已提交
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
static int handle_exit_race(u32 __user *uaddr, u32 uval,
			    struct task_struct *tsk)
{
	u32 uval2;

	/*
	 * If PF_EXITPIDONE is not yet set, then try again.
	 */
	if (tsk && !(tsk->flags & PF_EXITPIDONE))
		return -EAGAIN;

	/*
	 * Reread the user space value to handle the following situation:
	 *
	 * CPU0				CPU1
	 *
	 * sys_exit()			sys_futex()
	 *  do_exit()			 futex_lock_pi()
	 *                                futex_lock_pi_atomic()
	 *   exit_signals(tsk)		    No waiters:
	 *    tsk->flags |= PF_EXITING;	    *uaddr == 0x00000PID
	 *  mm_release(tsk)		    Set waiter bit
	 *   exit_robust_list(tsk) {	    *uaddr = 0x80000PID;
	 *      Set owner died		    attach_to_pi_owner() {
	 *    *uaddr = 0xC0000000;	     tsk = get_task(PID);
	 *   }				     if (!tsk->flags & PF_EXITING) {
	 *  ...				       attach();
	 *  tsk->flags |= PF_EXITPIDONE;     } else {
	 *				       if (!(tsk->flags & PF_EXITPIDONE))
	 *				         return -EAGAIN;
	 *				       return -ESRCH; <--- FAIL
	 *				     }
	 *
	 * Returning ESRCH unconditionally is wrong here because the
	 * user space value has been changed by the exiting task.
	 *
	 * The same logic applies to the case where the exiting task is
	 * already gone.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		return -EFAULT;

	/* If the user space value has changed, try again. */
	if (uval2 != uval)
		return -EAGAIN;

	/*
	 * The exiting task did not have a robust list, the robust list was
	 * corrupted or the user space value in *uaddr is simply bogus.
	 * Give up and tell user space.
	 */
	return -ESRCH;
}

1210 1211 1212 1213
/*
 * Lookup the task for the TID provided from user space and attach to
 * it after doing proper sanity checks.
 */
T
Thomas Gleixner 已提交
1214
static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1215
			      struct futex_pi_state **ps)
1216 1217
{
	pid_t pid = uval & FUTEX_TID_MASK;
1218 1219
	struct futex_pi_state *pi_state;
	struct task_struct *p;
1220

1221
	/*
1222
	 * We are the first waiter - try to look up the real owner and attach
1223
	 * the new pi_state to it, but bail out when TID = 0 [1]
T
Thomas Gleixner 已提交
1224 1225 1226
	 *
	 * The !pid check is paranoid. None of the call sites should end up
	 * with pid == 0, but better safe than sorry. Let the caller retry
1227
	 */
1228
	if (!pid)
T
Thomas Gleixner 已提交
1229
		return -EAGAIN;
1230
	p = find_get_task_by_vpid(pid);
1231
	if (!p)
T
Thomas Gleixner 已提交
1232
		return handle_exit_race(uaddr, uval, NULL);
1233

1234
	if (unlikely(p->flags & PF_KTHREAD)) {
1235 1236 1237 1238
		put_task_struct(p);
		return -EPERM;
	}

1239 1240 1241 1242 1243 1244
	/*
	 * We need to look at the task state flags to figure out,
	 * whether the task is exiting. To protect against the do_exit
	 * change of the task flags, we do this protected by
	 * p->pi_lock:
	 */
1245
	raw_spin_lock_irq(&p->pi_lock);
1246 1247 1248 1249 1250 1251
	if (unlikely(p->flags & PF_EXITING)) {
		/*
		 * The task is on the way out. When PF_EXITPIDONE is
		 * set, we know that the task has finished the
		 * cleanup:
		 */
T
Thomas Gleixner 已提交
1252
		int ret = handle_exit_race(uaddr, uval, p);
1253

1254
		raw_spin_unlock_irq(&p->pi_lock);
1255 1256 1257
		put_task_struct(p);
		return ret;
	}
1258

1259 1260
	/*
	 * No existing pi state. First waiter. [2]
P
Peter Zijlstra 已提交
1261 1262 1263
	 *
	 * This creates pi_state, we have hb->lock held, this means nothing can
	 * observe this state, wait_lock is irrelevant.
1264
	 */
1265 1266 1267
	pi_state = alloc_pi_state();

	/*
1268
	 * Initialize the pi_mutex in locked state and make @p
1269 1270 1271 1272 1273
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
1274
	pi_state->key = *key;
1275

1276
	WARN_ON(!list_empty(&pi_state->list));
1277
	list_add(&pi_state->list, &p->pi_state_list);
1278 1279 1280 1281
	/*
	 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
	 * because there is no concurrency as the object is not published yet.
	 */
1282
	pi_state->owner = p;
1283
	raw_spin_unlock_irq(&p->pi_lock);
1284 1285 1286

	put_task_struct(p);

P
Pierre Peiffer 已提交
1287
	*ps = pi_state;
1288 1289 1290 1291

	return 0;
}

P
Peter Zijlstra 已提交
1292 1293
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
			   struct futex_hash_bucket *hb,
1294 1295
			   union futex_key *key, struct futex_pi_state **ps)
{
1296
	struct futex_q *top_waiter = futex_top_waiter(hb, key);
1297 1298 1299 1300 1301

	/*
	 * If there is a waiter on that futex, validate it and
	 * attach to the pi_state when the validation succeeds.
	 */
1302
	if (top_waiter)
P
Peter Zijlstra 已提交
1303
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1304 1305 1306 1307 1308

	/*
	 * We are the first waiter - try to look up the owner based on
	 * @uval and attach to it.
	 */
T
Thomas Gleixner 已提交
1309
	return attach_to_pi_owner(uaddr, uval, key, ps);
1310 1311
}

1312 1313
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
1314
	int err;
1315 1316
	u32 uninitialized_var(curval);

1317 1318 1319
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1320 1321 1322
	err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
	if (unlikely(err))
		return err;
1323

P
Peter Zijlstra 已提交
1324
	/* If user space value changed, let the caller retry */
1325 1326 1327
	return curval != uval ? -EAGAIN : 0;
}

1328
/**
1329
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1330 1331 1332 1333 1334 1335 1336 1337
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1338
 *
1339
 * Return:
1340 1341 1342
 *  -  0 - ready to wait;
 *  -  1 - acquired the lock;
 *  - <0 - error
1343 1344 1345 1346 1347 1348
 *
 * The hb->lock and futex_key refs shall be held by the caller.
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
1349
				struct task_struct *task, int set_waiters)
1350
{
1351
	u32 uval, newval, vpid = task_pid_vnr(task);
1352
	struct futex_q *top_waiter;
1353
	int ret;
1354 1355

	/*
1356 1357
	 * Read the user space value first so we can validate a few
	 * things before proceeding further.
1358
	 */
1359
	if (get_futex_value_locked(&uval, uaddr))
1360 1361
		return -EFAULT;

1362 1363 1364
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1365 1366 1367
	/*
	 * Detect deadlocks.
	 */
1368
	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1369 1370
		return -EDEADLK;

1371 1372 1373
	if ((unlikely(should_fail_futex(true))))
		return -EDEADLK;

1374
	/*
1375 1376
	 * Lookup existing state first. If it exists, try to attach to
	 * its pi_state.
1377
	 */
1378 1379
	top_waiter = futex_top_waiter(hb, key);
	if (top_waiter)
P
Peter Zijlstra 已提交
1380
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1381 1382

	/*
1383 1384 1385 1386
	 * No waiter and user TID is 0. We are here because the
	 * waiters or the owner died bit is set or called from
	 * requeue_cmp_pi or for whatever reason something took the
	 * syscall.
1387
	 */
1388
	if (!(uval & FUTEX_TID_MASK)) {
1389
		/*
1390 1391
		 * We take over the futex. No other waiters and the user space
		 * TID is 0. We preserve the owner died bit.
1392
		 */
1393 1394
		newval = uval & FUTEX_OWNER_DIED;
		newval |= vpid;
1395

1396 1397 1398 1399 1400 1401 1402 1403
		/* The futex requeue_pi code can enforce the waiters bit */
		if (set_waiters)
			newval |= FUTEX_WAITERS;

		ret = lock_pi_update_atomic(uaddr, uval, newval);
		/* If the take over worked, return 1 */
		return ret < 0 ? ret : 1;
	}
1404 1405

	/*
1406 1407 1408
	 * First waiter. Set the waiters bit before attaching ourself to
	 * the owner. If owner tries to unlock, it will be forced into
	 * the kernel and blocked on hb->lock.
1409
	 */
1410 1411 1412 1413
	newval = uval | FUTEX_WAITERS;
	ret = lock_pi_update_atomic(uaddr, uval, newval);
	if (ret)
		return ret;
1414
	/*
1415 1416 1417
	 * If the update of the user space value succeeded, we try to
	 * attach to the owner. If that fails, no harm done, we only
	 * set the FUTEX_WAITERS bit in the user space variable.
1418
	 */
T
Thomas Gleixner 已提交
1419
	return attach_to_pi_owner(uaddr, newval, key, ps);
1420 1421
}

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

1432
	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1433
		return;
1434
	lockdep_assert_held(q->lock_ptr);
1435 1436 1437

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
1438
	hb_waiters_dec(hb);
1439 1440
}

L
Linus Torvalds 已提交
1441 1442
/*
 * The hash bucket lock must be held when this is called.
1443 1444 1445
 * Afterwards, the futex_q must not be accessed. Callers
 * must ensure to later call wake_up_q() for the actual
 * wakeups to occur.
L
Linus Torvalds 已提交
1446
 */
1447
static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
L
Linus Torvalds 已提交
1448
{
T
Thomas Gleixner 已提交
1449 1450
	struct task_struct *p = q->task;

1451 1452 1453
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

1454
	get_task_struct(p);
1455
	__unqueue_futex(q);
L
Linus Torvalds 已提交
1456
	/*
1457 1458 1459 1460 1461
	 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
	 * is written, without taking any locks. This is possible in the event
	 * of a spurious wakeup, for example. A memory barrier is required here
	 * to prevent the following store to lock_ptr from getting ahead of the
	 * plist_del in __unqueue_futex().
L
Linus Torvalds 已提交
1462
	 */
1463
	smp_store_release(&q->lock_ptr, NULL);
1464 1465 1466 1467 1468

	/*
	 * Queue the task for later wakeup for after we've released
	 * the hb->lock. wake_q_add() grabs reference to p.
	 */
1469
	wake_q_add_safe(wake_q, p);
L
Linus Torvalds 已提交
1470 1471
}

1472 1473 1474 1475
/*
 * Caller must hold a reference on @pi_state.
 */
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1476
{
1477
	u32 uninitialized_var(curval), newval;
1478
	struct task_struct *new_owner;
P
Peter Zijlstra 已提交
1479
	bool postunlock = false;
1480
	DEFINE_WAKE_Q(wake_q);
1481
	int ret = 0;
1482 1483

	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1484
	if (WARN_ON_ONCE(!new_owner)) {
1485
		/*
1486
		 * As per the comment in futex_unlock_pi() this should not happen.
1487 1488 1489 1490 1491 1492 1493 1494
		 *
		 * When this happens, give up our locks and try again, giving
		 * the futex_lock_pi() instance time to complete, either by
		 * waiting on the rtmutex or removing itself from the futex
		 * queue.
		 */
		ret = -EAGAIN;
		goto out_unlock;
1495
	}
1496 1497

	/*
1498 1499 1500
	 * We pass it to the next owner. The WAITERS bit is always kept
	 * enabled while there is PI state around. We cleanup the owner
	 * died bit, because we are the owner.
1501
	 */
1502
	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1503

1504 1505 1506
	if (unlikely(should_fail_futex(true)))
		ret = -EFAULT;

1507 1508
	ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
	if (!ret && (curval != uval)) {
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
		/*
		 * If a unconditional UNLOCK_PI operation (user space did not
		 * try the TID->0 transition) raced with a waiter setting the
		 * FUTEX_WAITERS flag between get_user() and locking the hash
		 * bucket lock, retry the operation.
		 */
		if ((FUTEX_TID_MASK & curval) == uval)
			ret = -EAGAIN;
		else
			ret = -EINVAL;
	}
P
Peter Zijlstra 已提交
1520

1521 1522
	if (ret)
		goto out_unlock;
1523

1524 1525 1526 1527 1528
	/*
	 * This is a point of no return; once we modify the uval there is no
	 * going back and subsequent operations must not fail.
	 */

1529
	raw_spin_lock(&pi_state->owner->pi_lock);
1530 1531
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
1532
	raw_spin_unlock(&pi_state->owner->pi_lock);
1533

1534
	raw_spin_lock(&new_owner->pi_lock);
1535
	WARN_ON(!list_empty(&pi_state->list));
1536 1537
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
1538
	raw_spin_unlock(&new_owner->pi_lock);
1539

P
Peter Zijlstra 已提交
1540
	postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1541

1542
out_unlock:
1543 1544
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

P
Peter Zijlstra 已提交
1545 1546
	if (postunlock)
		rt_mutex_postunlock(&wake_q);
1547

1548
	return ret;
1549 1550
}

I
Ingo Molnar 已提交
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
1567 1568 1569
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
1570
	spin_unlock(&hb1->lock);
1571 1572
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
1573 1574
}

L
Linus Torvalds 已提交
1575
/*
D
Darren Hart 已提交
1576
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
1577
 */
1578 1579
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
1580
{
1581
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1582
	struct futex_q *this, *next;
1583
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
1584
	int ret;
1585
	DEFINE_WAKE_Q(wake_q);
L
Linus Torvalds 已提交
1586

1587 1588 1589
	if (!bitset)
		return -EINVAL;

1590
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
L
Linus Torvalds 已提交
1591 1592 1593
	if (unlikely(ret != 0))
		goto out;

1594
	hb = hash_futex(&key);
1595 1596 1597 1598 1599

	/* Make sure we really have tasks to wakeup */
	if (!hb_waiters_pending(hb))
		goto out_put_key;

1600
	spin_lock(&hb->lock);
L
Linus Torvalds 已提交
1601

J
Jason Low 已提交
1602
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
L
Linus Torvalds 已提交
1603
		if (match_futex (&this->key, &key)) {
1604
			if (this->pi_state || this->rt_waiter) {
1605 1606 1607
				ret = -EINVAL;
				break;
			}
1608 1609 1610 1611 1612

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

1613
			mark_wake_futex(&wake_q, this);
L
Linus Torvalds 已提交
1614 1615 1616 1617 1618
			if (++ret >= nr_wake)
				break;
		}
	}

1619
	spin_unlock(&hb->lock);
1620
	wake_up_q(&wake_q);
1621
out_put_key:
1622
	put_futex_key(&key);
1623
out:
L
Linus Torvalds 已提交
1624 1625 1626
	return ret;
}

1627 1628 1629 1630
static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{
	unsigned int op =	  (encoded_op & 0x70000000) >> 28;
	unsigned int cmp =	  (encoded_op & 0x0f000000) >> 24;
1631 1632
	int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
	int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1633 1634 1635
	int oldval, ret;

	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
		if (oparg < 0 || oparg > 31) {
			char comm[sizeof(current->comm)];
			/*
			 * kill this print and return -EINVAL when userspace
			 * is sane again
			 */
			pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
					get_task_comm(comm, current), oparg);
			oparg &= 31;
		}
1646 1647 1648
		oparg = 1 << oparg;
	}

1649
	if (!access_ok(uaddr, sizeof(u32)))
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
		return -EFAULT;

	ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
	if (ret)
		return ret;

	switch (cmp) {
	case FUTEX_OP_CMP_EQ:
		return oldval == cmparg;
	case FUTEX_OP_CMP_NE:
		return oldval != cmparg;
	case FUTEX_OP_CMP_LT:
		return oldval < cmparg;
	case FUTEX_OP_CMP_GE:
		return oldval >= cmparg;
	case FUTEX_OP_CMP_LE:
		return oldval <= cmparg;
	case FUTEX_OP_CMP_GT:
		return oldval > cmparg;
	default:
		return -ENOSYS;
	}
}

1674 1675 1676 1677
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1678
static int
1679
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1680
	      int nr_wake, int nr_wake2, int op)
1681
{
1682
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1683
	struct futex_hash_bucket *hb1, *hb2;
1684
	struct futex_q *this, *next;
D
Darren Hart 已提交
1685
	int ret, op_ret;
1686
	DEFINE_WAKE_Q(wake_q);
1687

D
Darren Hart 已提交
1688
retry:
1689
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1690 1691
	if (unlikely(ret != 0))
		goto out;
1692
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1693
	if (unlikely(ret != 0))
1694
		goto out_put_key1;
1695

1696 1697
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1698

D
Darren Hart 已提交
1699
retry_private:
T
Thomas Gleixner 已提交
1700
	double_lock_hb(hb1, hb2);
1701
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1702
	if (unlikely(op_ret < 0)) {
D
Darren Hart 已提交
1703
		double_unlock_hb(hb1, hb2);
1704

1705 1706 1707 1708 1709 1710
		if (!IS_ENABLED(CONFIG_MMU) ||
		    unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
			/*
			 * we don't get EFAULT from MMU faults if we don't have
			 * an MMU, but we might get them from range checking
			 */
1711
			ret = op_ret;
1712
			goto out_put_keys;
1713 1714
		}

1715 1716 1717 1718 1719
		if (op_ret == -EFAULT) {
			ret = fault_in_user_writeable(uaddr2);
			if (ret)
				goto out_put_keys;
		}
1720

1721 1722
		if (!(flags & FLAGS_SHARED)) {
			cond_resched();
D
Darren Hart 已提交
1723
			goto retry_private;
1724
		}
D
Darren Hart 已提交
1725

1726 1727
		put_futex_key(&key2);
		put_futex_key(&key1);
1728
		cond_resched();
D
Darren Hart 已提交
1729
		goto retry;
1730 1731
	}

J
Jason Low 已提交
1732
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1733
		if (match_futex (&this->key, &key1)) {
1734 1735 1736 1737
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1738
			mark_wake_futex(&wake_q, this);
1739 1740 1741 1742 1743 1744 1745
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
		op_ret = 0;
J
Jason Low 已提交
1746
		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1747
			if (match_futex (&this->key, &key2)) {
1748 1749 1750 1751
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1752
				mark_wake_futex(&wake_q, this);
1753 1754 1755 1756 1757 1758 1759
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1760
out_unlock:
D
Darren Hart 已提交
1761
	double_unlock_hb(hb1, hb2);
1762
	wake_up_q(&wake_q);
1763
out_put_keys:
1764
	put_futex_key(&key2);
1765
out_put_key1:
1766
	put_futex_key(&key1);
1767
out:
1768 1769 1770
	return ret;
}

D
Darren Hart 已提交
1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
1789 1790
		hb_waiters_dec(hb1);
		hb_waiters_inc(hb2);
1791
		plist_add(&q->list, &hb2->chain);
D
Darren Hart 已提交
1792 1793 1794 1795 1796 1797
		q->lock_ptr = &hb2->lock;
	}
	get_futex_key_refs(key2);
	q->key = *key2;
}

1798 1799
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1800 1801 1802
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1803 1804 1805 1806 1807
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1808 1809 1810
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1811 1812
 */
static inline
1813 1814
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1815 1816 1817 1818
{
	get_futex_key_refs(key);
	q->key = *key;

1819
	__unqueue_futex(q);
1820 1821 1822 1823

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1824 1825
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1826
	wake_up_state(q->task, TASK_NORMAL);
1827 1828 1829 1830
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1831 1832 1833 1834 1835 1836 1837
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1838 1839
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1840 1841 1842
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1843
 *
1844
 * Return:
1845 1846 1847
 *  -  0 - failed to acquire the lock atomically;
 *  - >0 - acquired the lock, return value is vpid of the top_waiter
 *  - <0 - error
1848 1849 1850 1851 1852
 */
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
				 struct futex_hash_bucket *hb1,
				 struct futex_hash_bucket *hb2,
				 union futex_key *key1, union futex_key *key2,
1853
				 struct futex_pi_state **ps, int set_waiters)
1854
{
1855
	struct futex_q *top_waiter = NULL;
1856
	u32 curval;
1857
	int ret, vpid;
1858 1859 1860 1861

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1862 1863 1864
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1865 1866 1867 1868 1869 1870 1871 1872
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1873 1874 1875 1876 1877 1878
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1879 1880 1881 1882
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1883
	/*
1884 1885 1886
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1887
	 */
1888
	vpid = task_pid_vnr(top_waiter->task);
1889 1890
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
				   set_waiters);
1891
	if (ret == 1) {
1892
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1893 1894
		return vpid;
	}
1895 1896 1897 1898 1899
	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1900
 * @uaddr1:	source futex user address
1901
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1902 1903 1904 1905 1906
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1907
 *		pi futex (pi to pi requeue is not supported)
1908 1909 1910 1911
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1912
 * Return:
1913 1914
 *  - >=0 - on success, the number of tasks requeued or woken;
 *  -  <0 - on error
L
Linus Torvalds 已提交
1915
 */
1916 1917 1918
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
1919
{
1920
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1921 1922
	int drop_count = 0, task_count = 0, ret;
	struct futex_pi_state *pi_state = NULL;
1923
	struct futex_hash_bucket *hb1, *hb2;
L
Linus Torvalds 已提交
1924
	struct futex_q *this, *next;
1925
	DEFINE_WAKE_Q(wake_q);
1926

1927 1928 1929
	if (nr_wake < 0 || nr_requeue < 0)
		return -EINVAL;

1930 1931 1932 1933 1934 1935 1936 1937 1938
	/*
	 * When PI not supported: return -ENOSYS if requeue_pi is true,
	 * consequently the compiler knows requeue_pi is always false past
	 * this point which will optimize away all the conditional code
	 * further down.
	 */
	if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
		return -ENOSYS;

1939
	if (requeue_pi) {
1940 1941 1942 1943 1944 1945 1946
		/*
		 * Requeue PI only works on two distinct uaddrs. This
		 * check is only valid for private futexes. See below.
		 */
		if (uaddr1 == uaddr2)
			return -EINVAL;

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
1966

1967
retry:
1968
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
L
Linus Torvalds 已提交
1969 1970
	if (unlikely(ret != 0))
		goto out;
1971
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1972
			    requeue_pi ? FUTEX_WRITE : FUTEX_READ);
L
Linus Torvalds 已提交
1973
	if (unlikely(ret != 0))
1974
		goto out_put_key1;
L
Linus Torvalds 已提交
1975

1976 1977 1978 1979 1980 1981 1982 1983 1984
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (requeue_pi && match_futex(&key1, &key2)) {
		ret = -EINVAL;
		goto out_put_keys;
	}

1985 1986
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
1987

D
Darren Hart 已提交
1988
retry_private:
1989
	hb_waiters_inc(hb2);
I
Ingo Molnar 已提交
1990
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1991

1992 1993
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
1994

1995
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
1996 1997

		if (unlikely(ret)) {
D
Darren Hart 已提交
1998
			double_unlock_hb(hb1, hb2);
1999
			hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
2000

2001
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
2002 2003
			if (ret)
				goto out_put_keys;
L
Linus Torvalds 已提交
2004

2005
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2006
				goto retry_private;
L
Linus Torvalds 已提交
2007

2008 2009
			put_futex_key(&key2);
			put_futex_key(&key1);
D
Darren Hart 已提交
2010
			goto retry;
L
Linus Torvalds 已提交
2011
		}
2012
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
2013 2014 2015 2016 2017
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

2018
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2019 2020 2021 2022 2023 2024
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
2025
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2026
						 &key2, &pi_state, nr_requeue);
2027 2028 2029 2030 2031

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
2032 2033
		 * reference to it. If the lock was taken, ret contains the
		 * vpid of the top waiter task.
2034 2035
		 * If the lock was not taken, we have pi_state and an initial
		 * refcount on it. In case of an error we have nothing.
2036
		 */
2037
		if (ret > 0) {
2038
			WARN_ON(pi_state);
2039
			drop_count++;
2040
			task_count++;
2041
			/*
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
			 * If we acquired the lock, then the user space value
			 * of uaddr2 should be vpid. It cannot be changed by
			 * the top waiter as it is blocked on hb2 lock if it
			 * tries to do so. If something fiddled with it behind
			 * our back the pi state lookup might unearth it. So
			 * we rather use the known value than rereading and
			 * handing potential crap to lookup_pi_state.
			 *
			 * If that call succeeds then we have pi_state and an
			 * initial refcount on it.
2052
			 */
P
Peter Zijlstra 已提交
2053
			ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
2054 2055 2056 2057
		}

		switch (ret) {
		case 0:
2058
			/* We hold a reference on the pi state. */
2059
			break;
2060 2061

			/* If the above failed, then pi_state is NULL */
2062 2063
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
2064
			hb_waiters_dec(hb2);
2065 2066
			put_futex_key(&key2);
			put_futex_key(&key1);
2067
			ret = fault_in_user_writeable(uaddr2);
2068 2069 2070 2071
			if (!ret)
				goto retry;
			goto out;
		case -EAGAIN:
2072 2073 2074 2075 2076 2077
			/*
			 * Two reasons for this:
			 * - Owner is exiting and we just wait for the
			 *   exit to complete.
			 * - The user space value changed.
			 */
2078
			double_unlock_hb(hb1, hb2);
2079
			hb_waiters_dec(hb2);
2080 2081
			put_futex_key(&key2);
			put_futex_key(&key1);
2082 2083 2084 2085 2086 2087 2088
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

J
Jason Low 已提交
2089
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2090 2091 2092 2093
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
2094
			continue;
2095

2096 2097 2098
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
2099 2100 2101
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
2102 2103
		 */
		if ((requeue_pi && !this->rt_waiter) ||
2104 2105
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
2106 2107 2108
			ret = -EINVAL;
			break;
		}
2109 2110 2111 2112 2113 2114 2115

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
2116
			mark_wake_futex(&wake_q, this);
2117 2118
			continue;
		}
L
Linus Torvalds 已提交
2119

2120 2121 2122 2123 2124 2125
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

2126 2127 2128 2129 2130
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
2131 2132 2133 2134 2135
			/*
			 * Prepare the waiter to take the rt_mutex. Take a
			 * refcount on the pi_state and store the pointer in
			 * the futex_q object of the waiter.
			 */
P
Peter Zijlstra 已提交
2136
			get_pi_state(pi_state);
2137 2138 2139
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
2140
							this->task);
2141
			if (ret == 1) {
2142 2143 2144 2145 2146 2147 2148 2149
				/*
				 * We got the lock. We do neither drop the
				 * refcount on pi_state nor clear
				 * this->pi_state because the waiter needs the
				 * pi_state for cleaning up the user space
				 * value. It will drop the refcount after
				 * doing so.
				 */
2150
				requeue_pi_wake_futex(this, &key2, hb2);
2151
				drop_count++;
2152 2153
				continue;
			} else if (ret) {
2154 2155 2156 2157 2158 2159 2160 2161
				/*
				 * rt_mutex_start_proxy_lock() detected a
				 * potential deadlock when we tried to queue
				 * that waiter. Drop the pi_state reference
				 * which we took above and remove the pointer
				 * to the state from the waiters futex_q
				 * object.
				 */
2162
				this->pi_state = NULL;
2163
				put_pi_state(pi_state);
2164 2165 2166 2167 2168
				/*
				 * We stop queueing more waiters and let user
				 * space deal with the mess.
				 */
				break;
2169
			}
L
Linus Torvalds 已提交
2170
		}
2171 2172
		requeue_futex(this, hb1, hb2, &key2);
		drop_count++;
L
Linus Torvalds 已提交
2173 2174
	}

2175 2176 2177 2178 2179
	/*
	 * We took an extra initial reference to the pi_state either
	 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
	 * need to drop it here again.
	 */
2180
	put_pi_state(pi_state);
2181 2182

out_unlock:
D
Darren Hart 已提交
2183
	double_unlock_hb(hb1, hb2);
2184
	wake_up_q(&wake_q);
2185
	hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
2186

2187 2188 2189 2190 2191 2192
	/*
	 * drop_futex_key_refs() must be called outside the spinlocks. During
	 * the requeue we moved futex_q's from the hash bucket at key1 to the
	 * one at key2 and updated their key pointer.  We no longer need to
	 * hold the references to key1.
	 */
L
Linus Torvalds 已提交
2193
	while (--drop_count >= 0)
2194
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
2195

2196
out_put_keys:
2197
	put_futex_key(&key2);
2198
out_put_key1:
2199
	put_futex_key(&key1);
2200
out:
2201
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
2202 2203 2204
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
2205
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2206
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
2207
{
2208
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
2209

2210
	hb = hash_futex(&q->key);
2211 2212 2213 2214 2215 2216 2217 2218 2219

	/*
	 * Increment the counter before taking the lock so that
	 * a potential waker won't miss a to-be-slept task that is
	 * waiting for the spinlock. This is safe as all queue_lock()
	 * users end up calling queue_me(). Similarly, for housekeeping,
	 * decrement the counter at queue_unlock() when some error has
	 * occurred and we don't end up adding the task to the list.
	 */
D
Davidlohr Bueso 已提交
2220
	hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2221

2222
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
2223

D
Davidlohr Bueso 已提交
2224
	spin_lock(&hb->lock);
2225
	return hb;
L
Linus Torvalds 已提交
2226 2227
}

2228
static inline void
J
Jason Low 已提交
2229
queue_unlock(struct futex_hash_bucket *hb)
2230
	__releases(&hb->lock)
2231 2232
{
	spin_unlock(&hb->lock);
2233
	hb_waiters_dec(hb);
2234 2235
}

2236
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
L
Linus Torvalds 已提交
2237
{
P
Pierre Peiffer 已提交
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
2252
	q->task = current;
2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
	__releases(&hb->lock)
{
	__queue_me(q, hb);
2271
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
2272 2273
}

2274 2275 2276 2277 2278 2279 2280
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
2281
 * Return:
2282 2283
 *  - 1 - if the futex_q was still queued (and we removed unqueued it);
 *  - 0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
2284 2285 2286 2287
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
2288
	int ret = 0;
L
Linus Torvalds 已提交
2289 2290

	/* In the common case we don't take the spinlock, which is nice. */
2291
retry:
2292 2293 2294 2295 2296 2297
	/*
	 * q->lock_ptr can change between this read and the following spin_lock.
	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
	 * optimizing lock_ptr out of the logic below.
	 */
	lock_ptr = READ_ONCE(q->lock_ptr);
2298
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
2317
		__unqueue_futex(q);
2318 2319 2320

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
2321 2322 2323 2324
		spin_unlock(lock_ptr);
		ret = 1;
	}

2325
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
2326 2327 2328
	return ret;
}

2329 2330
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
2331 2332
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
2333
 */
P
Pierre Peiffer 已提交
2334
static void unqueue_me_pi(struct futex_q *q)
2335
	__releases(q->lock_ptr)
2336
{
2337
	__unqueue_futex(q);
2338 2339

	BUG_ON(!q->pi_state);
2340
	put_pi_state(q->pi_state);
2341 2342
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
2343
	spin_unlock(q->lock_ptr);
2344 2345
}

2346
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2347
				struct task_struct *argowner)
P
Pierre Peiffer 已提交
2348 2349
{
	struct futex_pi_state *pi_state = q->pi_state;
2350
	u32 uval, uninitialized_var(curval), newval;
2351 2352
	struct task_struct *oldowner, *newowner;
	u32 newtid;
2353
	int ret, err = 0;
P
Pierre Peiffer 已提交
2354

2355 2356
	lockdep_assert_held(q->lock_ptr);

P
Peter Zijlstra 已提交
2357 2358 2359
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	oldowner = pi_state->owner;
2360 2361

	/*
2362
	 * We are here because either:
2363
	 *
2364 2365 2366 2367 2368 2369 2370 2371 2372
	 *  - we stole the lock and pi_state->owner needs updating to reflect
	 *    that (@argowner == current),
	 *
	 * or:
	 *
	 *  - someone stole our lock and we need to fix things to point to the
	 *    new owner (@argowner == NULL).
	 *
	 * Either way, we have to replace the TID in the user space variable.
2373
	 * This must be atomic as we have to preserve the owner died bit here.
2374
	 *
D
Darren Hart 已提交
2375 2376 2377
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
2378
	 *
P
Peter Zijlstra 已提交
2379 2380 2381 2382
	 * Modifying pi_state _before_ the user space value would leave the
	 * pi_state in an inconsistent state when we fault here, because we
	 * need to drop the locks to handle the fault. This might be observed
	 * in the PID check in lookup_pi_state.
2383 2384
	 */
retry:
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
	if (!argowner) {
		if (oldowner != current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}

		if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
			/* We got the lock after all, nothing to fix. */
			ret = 0;
			goto out_unlock;
		}

		/*
		 * Since we just failed the trylock; there must be an owner.
		 */
		newowner = rt_mutex_owner(&pi_state->pi_mutex);
		BUG_ON(!newowner);
	} else {
		WARN_ON_ONCE(argowner != current);
		if (oldowner == current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}
		newowner = argowner;
	}

	newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Peter Zijlstra 已提交
2420 2421 2422
	/* Owner died? */
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;
2423

2424 2425 2426
	err = get_futex_value_locked(&uval, uaddr);
	if (err)
		goto handle_err;
2427

2428
	for (;;) {
2429 2430
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

2431 2432 2433 2434
		err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
		if (err)
			goto handle_err;

2435 2436 2437 2438 2439 2440 2441 2442 2443
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
2444
	if (pi_state->owner != NULL) {
P
Peter Zijlstra 已提交
2445
		raw_spin_lock(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
2446 2447
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
P
Peter Zijlstra 已提交
2448
		raw_spin_unlock(&pi_state->owner->pi_lock);
2449
	}
P
Pierre Peiffer 已提交
2450

2451
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
2452

P
Peter Zijlstra 已提交
2453
	raw_spin_lock(&newowner->pi_lock);
P
Pierre Peiffer 已提交
2454
	WARN_ON(!list_empty(&pi_state->list));
2455
	list_add(&pi_state->list, &newowner->pi_state_list);
P
Peter Zijlstra 已提交
2456 2457 2458
	raw_spin_unlock(&newowner->pi_lock);
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

2459
	return 0;
P
Pierre Peiffer 已提交
2460 2461

	/*
2462 2463 2464 2465 2466 2467 2468
	 * In order to reschedule or handle a page fault, we need to drop the
	 * locks here. In the case of a fault, this gives the other task
	 * (either the highest priority waiter itself or the task which stole
	 * the rtmutex) the chance to try the fixup of the pi_state. So once we
	 * are back from handling the fault we need to check the pi_state after
	 * reacquiring the locks and before trying to do another fixup. When
	 * the fixup has been done already we simply return.
P
Peter Zijlstra 已提交
2469 2470 2471 2472
	 *
	 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
	 * drop hb->lock since the caller owns the hb -> futex_q relation.
	 * Dropping the pi_mutex->wait_lock requires the state revalidate.
P
Pierre Peiffer 已提交
2473
	 */
2474
handle_err:
P
Peter Zijlstra 已提交
2475
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2476
	spin_unlock(q->lock_ptr);
2477

2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
	switch (err) {
	case -EFAULT:
		ret = fault_in_user_writeable(uaddr);
		break;

	case -EAGAIN:
		cond_resched();
		ret = 0;
		break;

	default:
		WARN_ON_ONCE(1);
		ret = err;
		break;
	}
2493

2494
	spin_lock(q->lock_ptr);
P
Peter Zijlstra 已提交
2495
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2496

2497 2498 2499
	/*
	 * Check if someone else fixed it for us:
	 */
P
Peter Zijlstra 已提交
2500 2501 2502 2503
	if (pi_state->owner != oldowner) {
		ret = 0;
		goto out_unlock;
	}
2504 2505

	if (ret)
P
Peter Zijlstra 已提交
2506
		goto out_unlock;
2507 2508

	goto retry;
P
Peter Zijlstra 已提交
2509 2510 2511 2512

out_unlock:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
P
Pierre Peiffer 已提交
2513 2514
}

N
Nick Piggin 已提交
2515
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
2516

2517 2518 2519 2520 2521 2522 2523 2524 2525 2526
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
2527
 * Return:
2528 2529 2530
 *  -  1 - success, lock taken;
 *  -  0 - success, lock not taken;
 *  - <0 - on error (-EFAULT)
2531
 */
2532
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2533 2534 2535 2536 2537 2538 2539
{
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
2540
		 *
2541 2542 2543
		 * Speculative pi_state->owner read (we don't hold wait_lock);
		 * since we own the lock pi_state->owner == current is the
		 * stable state, anything else needs more attention.
2544 2545
		 */
		if (q->pi_state->owner != current)
2546
			ret = fixup_pi_state_owner(uaddr, q, current);
2547 2548 2549
		goto out;
	}

2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
	/*
	 * If we didn't get the lock; check if anybody stole it from us. In
	 * that case, we need to fix up the uval to point to them instead of
	 * us, otherwise bad things happen. [10]
	 *
	 * Another speculative read; pi_state->owner == current is unstable
	 * but needs our attention.
	 */
	if (q->pi_state->owner == current) {
		ret = fixup_pi_state_owner(uaddr, q, NULL);
		goto out;
	}

2563 2564
	/*
	 * Paranoia check. If we did not take the lock, then we should not be
2565
	 * the owner of the rt_mutex.
2566
	 */
2567
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
2568 2569 2570 2571
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);
2572
	}
2573 2574 2575 2576 2577

out:
	return ret ? ret : locked;
}

2578 2579 2580 2581 2582 2583 2584
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
2585
				struct hrtimer_sleeper *timeout)
2586
{
2587 2588
	/*
	 * The task state is guaranteed to be set before another task can
2589
	 * wake it. set_current_state() is implemented using smp_store_mb() and
2590 2591 2592
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
2593
	set_current_state(TASK_INTERRUPTIBLE);
2594
	queue_me(q, hb);
2595 2596

	/* Arm the timer */
2597
	if (timeout)
2598 2599 2600
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);

	/*
2601 2602
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
2603 2604 2605 2606 2607 2608 2609 2610
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
2611
			freezable_schedule();
2612 2613 2614 2615
	}
	__set_current_state(TASK_RUNNING);
}

2616 2617 2618 2619
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
2620
 * @flags:	futex flags (FLAGS_SHARED, etc.)
2621 2622 2623 2624 2625 2626 2627 2628
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
2629
 * Return:
2630 2631
 *  -  0 - uaddr contains val and hb has been locked;
 *  - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2632
 */
2633
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2634
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
2635
{
2636 2637
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
2638 2639

	/*
D
Darren Hart 已提交
2640
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
2641 2642 2643 2644 2645 2646 2647
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
2648 2649
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
2650 2651
	 * cond(var) false, which would violate the guarantee.
	 *
2652 2653 2654 2655
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
2656
	 */
2657
retry:
2658
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2659
	if (unlikely(ret != 0))
2660
		return ret;
2661 2662 2663 2664

retry_private:
	*hb = queue_lock(q);

2665
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
2666

2667
	if (ret) {
J
Jason Low 已提交
2668
		queue_unlock(*hb);
L
Linus Torvalds 已提交
2669

2670
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
2671
		if (ret)
2672
			goto out;
L
Linus Torvalds 已提交
2673

2674
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2675 2676
			goto retry_private;

2677
		put_futex_key(&q->key);
D
Darren Hart 已提交
2678
		goto retry;
L
Linus Torvalds 已提交
2679
	}
2680

2681
	if (uval != val) {
J
Jason Low 已提交
2682
		queue_unlock(*hb);
2683
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
2684
	}
L
Linus Torvalds 已提交
2685

2686 2687
out:
	if (ret)
2688
		put_futex_key(&q->key);
2689 2690 2691
	return ret;
}

2692 2693
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
2694 2695 2696 2697
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
2698
	struct futex_q q = futex_q_init;
2699 2700 2701 2702 2703 2704 2705 2706 2707
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

	if (abs_time) {
		to = &timeout;

2708 2709 2710
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2711 2712 2713 2714 2715
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

T
Thomas Gleixner 已提交
2716
retry:
2717 2718 2719 2720
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
2721
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2722 2723 2724
	if (ret)
		goto out;

2725
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
2726
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
2727 2728

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
2729
	ret = 0;
2730
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
2731
	if (!unqueue_me(&q))
2732
		goto out;
P
Peter Zijlstra 已提交
2733
	ret = -ETIMEDOUT;
2734
	if (to && !to->task)
2735
		goto out;
N
Nick Piggin 已提交
2736

2737
	/*
T
Thomas Gleixner 已提交
2738 2739
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
2740
	 */
2741
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
2742 2743
		goto retry;

P
Peter Zijlstra 已提交
2744
	ret = -ERESTARTSYS;
2745
	if (!abs_time)
2746
		goto out;
L
Linus Torvalds 已提交
2747

2748
	restart = &current->restart_block;
P
Peter Zijlstra 已提交
2749
	restart->fn = futex_wait_restart;
2750
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
2751
	restart->futex.val = val;
T
Thomas Gleixner 已提交
2752
	restart->futex.time = *abs_time;
P
Peter Zijlstra 已提交
2753
	restart->futex.bitset = bitset;
2754
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2755

P
Peter Zijlstra 已提交
2756 2757
	ret = -ERESTART_RESTARTBLOCK;

2758
out:
2759 2760 2761 2762
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
2763 2764 2765
	return ret;
}

N
Nick Piggin 已提交
2766 2767 2768

static long futex_wait_restart(struct restart_block *restart)
{
2769
	u32 __user *uaddr = restart->futex.uaddr;
2770
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
2771

2772
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
T
Thomas Gleixner 已提交
2773
		t = restart->futex.time;
2774 2775
		tp = &t;
	}
N
Nick Piggin 已提交
2776
	restart->fn = do_no_restart_syscall;
2777 2778 2779

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
2780 2781 2782
}


2783 2784 2785
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
2786 2787 2788 2789 2790
 * if there are waiters then it will block as a consequence of relying
 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
 * a 0 value of the futex too.).
 *
 * Also serves as futex trylock_pi()'ing, and due semantics.
2791
 */
2792
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2793
			 ktime_t *time, int trylock)
2794
{
2795
	struct hrtimer_sleeper timeout, *to = NULL;
2796
	struct futex_pi_state *pi_state = NULL;
2797
	struct rt_mutex_waiter rt_waiter;
2798
	struct futex_hash_bucket *hb;
2799
	struct futex_q q = futex_q_init;
2800
	int res, ret;
2801

2802 2803 2804
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2805 2806 2807
	if (refill_pi_state_cache())
		return -ENOMEM;

2808
	if (time) {
2809
		to = &timeout;
2810 2811
		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
				      HRTIMER_MODE_ABS);
2812
		hrtimer_init_sleeper(to, current);
2813
		hrtimer_set_expires(&to->timer, *time);
2814 2815
	}

2816
retry:
2817
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2818
	if (unlikely(ret != 0))
2819
		goto out;
2820

D
Darren Hart 已提交
2821
retry_private:
E
Eric Sesterhenn 已提交
2822
	hb = queue_lock(&q);
2823

2824
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2825
	if (unlikely(ret)) {
2826 2827 2828 2829
		/*
		 * Atomic work succeeded and we got the lock,
		 * or failed. Either way, we do _not_ block.
		 */
2830
		switch (ret) {
2831 2832 2833 2834 2835 2836
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2837 2838
		case -EAGAIN:
			/*
2839 2840 2841 2842
			 * Two reasons for this:
			 * - Task is exiting and we just wait for the
			 *   exit to complete.
			 * - The user space value changed.
2843
			 */
J
Jason Low 已提交
2844
			queue_unlock(hb);
2845
			put_futex_key(&q.key);
2846 2847 2848
			cond_resched();
			goto retry;
		default:
2849
			goto out_unlock_put_key;
2850 2851 2852
		}
	}

2853 2854
	WARN_ON(!q.pi_state);

2855 2856 2857
	/*
	 * Only actually queue now that the atomic ops are done:
	 */
2858
	__queue_me(&q, hb);
2859

2860
	if (trylock) {
2861
		ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2862 2863
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
2864
		goto no_block;
2865 2866
	}

2867 2868
	rt_mutex_init_waiter(&rt_waiter);

2869
	/*
2870 2871 2872 2873 2874 2875 2876
	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
	 * hold it while doing rt_mutex_start_proxy(), because then it will
	 * include hb->lock in the blocking chain, even through we'll not in
	 * fact hold it while blocking. This will lead it to report -EDEADLK
	 * and BUG when futex_unlock_pi() interleaves with this.
	 *
	 * Therefore acquire wait_lock while holding hb->lock, but drop the
2877 2878 2879 2880
	 * latter before calling __rt_mutex_start_proxy_lock(). This
	 * interleaves with futex_unlock_pi() -- which does a similar lock
	 * handoff -- such that the latter can observe the futex_q::pi_state
	 * before __rt_mutex_start_proxy_lock() is done.
2881
	 */
2882 2883
	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
	spin_unlock(q.lock_ptr);
2884 2885 2886 2887 2888
	/*
	 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
	 * such that futex_unlock_pi() is guaranteed to observe the waiter when
	 * it sees the futex_q::pi_state.
	 */
2889 2890 2891
	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);

2892 2893 2894
	if (ret) {
		if (ret == 1)
			ret = 0;
2895
		goto cleanup;
2896 2897 2898 2899 2900 2901 2902
	}

	if (unlikely(to))
		hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);

	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);

2903
cleanup:
2904
	spin_lock(q.lock_ptr);
2905
	/*
2906
	 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2907
	 * first acquire the hb->lock before removing the lock from the
2908 2909
	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
	 * lists consistent.
2910 2911 2912
	 *
	 * In particular; it is important that futex_unlock_pi() can not
	 * observe this inconsistency.
2913 2914 2915 2916 2917
	 */
	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
		ret = 0;

no_block:
2918 2919 2920 2921
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
2922
	res = fixup_owner(uaddr, &q, !ret);
2923 2924 2925 2926 2927 2928
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
2929

2930
	/*
2931 2932
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
2933
	 */
2934 2935 2936 2937
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
		pi_state = q.pi_state;
		get_pi_state(pi_state);
	}
2938

2939 2940
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
2941

2942 2943 2944 2945 2946
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

2947
	goto out_put_key;
2948

2949
out_unlock_put_key:
J
Jason Low 已提交
2950
	queue_unlock(hb);
2951

2952
out_put_key:
2953
	put_futex_key(&q.key);
2954
out:
2955 2956
	if (to) {
		hrtimer_cancel(&to->timer);
2957
		destroy_hrtimer_on_stack(&to->timer);
2958
	}
2959
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2960

2961
uaddr_faulted:
J
Jason Low 已提交
2962
	queue_unlock(hb);
2963

2964
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
2965 2966
	if (ret)
		goto out_put_key;
2967

2968
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2969 2970
		goto retry_private;

2971
	put_futex_key(&q.key);
D
Darren Hart 已提交
2972
	goto retry;
2973 2974 2975 2976 2977 2978 2979
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
2980
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2981
{
2982
	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2983
	union futex_key key = FUTEX_KEY_INIT;
2984
	struct futex_hash_bucket *hb;
2985
	struct futex_q *top_waiter;
D
Darren Hart 已提交
2986
	int ret;
2987

2988 2989 2990
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2991 2992 2993 2994 2995 2996
retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
2997
	if ((uval & FUTEX_TID_MASK) != vpid)
2998 2999
		return -EPERM;

3000
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
3001 3002
	if (ret)
		return ret;
3003 3004 3005 3006 3007

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
3008 3009 3010
	 * Check waiters first. We do not trust user space values at
	 * all and we at least want to know if user space fiddled
	 * with the futex value instead of blindly unlocking.
3011
	 */
3012 3013
	top_waiter = futex_top_waiter(hb, &key);
	if (top_waiter) {
3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
		struct futex_pi_state *pi_state = top_waiter->pi_state;

		ret = -EINVAL;
		if (!pi_state)
			goto out_unlock;

		/*
		 * If current does not own the pi_state then the futex is
		 * inconsistent and user space fiddled with the futex value.
		 */
		if (pi_state->owner != current)
			goto out_unlock;

3027
		get_pi_state(pi_state);
3028
		/*
3029 3030 3031 3032
		 * By taking wait_lock while still holding hb->lock, we ensure
		 * there is no point where we hold neither; and therefore
		 * wake_futex_pi() must observe a state consistent with what we
		 * observed.
3033 3034 3035 3036
		 *
		 * In particular; this forces __rt_mutex_start_proxy() to
		 * complete such that we're guaranteed to observe the
		 * rt_waiter. Also see the WARN in wake_futex_pi().
3037
		 */
3038
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3039 3040
		spin_unlock(&hb->lock);

3041
		/* drops pi_state->pi_mutex.wait_lock */
3042 3043 3044 3045 3046 3047
		ret = wake_futex_pi(uaddr, uval, pi_state);

		put_pi_state(pi_state);

		/*
		 * Success, we're done! No tricky corner cases.
3048 3049 3050
		 */
		if (!ret)
			goto out_putkey;
3051
		/*
3052 3053
		 * The atomic access to the futex value generated a
		 * pagefault, so retry the user-access and the wakeup:
3054 3055 3056
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
3057 3058 3059 3060
		/*
		 * A unconditional UNLOCK_PI op raced against a waiter
		 * setting the FUTEX_WAITERS bit. Try again.
		 */
3061 3062
		if (ret == -EAGAIN)
			goto pi_retry;
3063 3064 3065 3066
		/*
		 * wake_futex_pi has detected invalid state. Tell user
		 * space.
		 */
3067
		goto out_putkey;
3068
	}
3069

3070
	/*
3071 3072 3073 3074 3075
	 * We have no kernel internal state, i.e. no waiters in the
	 * kernel. Waiters which are about to queue themselves are stuck
	 * on hb->lock. So we can safely ignore them. We do neither
	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
	 * owner.
3076
	 */
3077
	if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3078
		spin_unlock(&hb->lock);
3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089
		switch (ret) {
		case -EFAULT:
			goto pi_faulted;

		case -EAGAIN:
			goto pi_retry;

		default:
			WARN_ON_ONCE(1);
			goto out_putkey;
		}
3090
	}
3091

3092 3093 3094 3095 3096
	/*
	 * If uval has changed, let user space handle it.
	 */
	ret = (curval == uval) ? 0 : -EAGAIN;

3097 3098
out_unlock:
	spin_unlock(&hb->lock);
3099
out_putkey:
3100
	put_futex_key(&key);
3101 3102
	return ret;

3103 3104 3105 3106 3107
pi_retry:
	put_futex_key(&key);
	cond_resched();
	goto retry;

3108
pi_faulted:
3109
	put_futex_key(&key);
3110

3111
	ret = fault_in_user_writeable(uaddr);
3112
	if (!ret)
3113 3114
		goto retry;

L
Linus Torvalds 已提交
3115 3116 3117
	return ret;
}

3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
3130
 * Return:
3131 3132
 *  -  0 = no early wakeup detected;
 *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
3154
		plist_del(&q->list, &hb->chain);
3155
		hb_waiters_dec(hb);
3156

T
Thomas Gleixner 已提交
3157
		/* Handle spurious wakeups gracefully */
3158
		ret = -EWOULDBLOCK;
3159 3160
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
3161
		else if (signal_pending(current))
3162
			ret = -ERESTARTNOINTR;
3163 3164 3165 3166 3167 3168
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3169
 * @uaddr:	the futex we initially wait on (non-pi)
3170
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3171
 *		the same type, no requeueing from private to shared, etc.
3172 3173
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
3174
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
3175 3176 3177
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3178 3179 3180 3181 3182
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
3183 3184
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3185
 * via the following--
3186
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3187 3188 3189
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
3190
 *
3191
 * If 3, cleanup and return -ERESTARTNOINTR.
3192 3193 3194 3195 3196 3197 3198
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
3199
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3200 3201 3202
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
3203
 * Return:
3204 3205
 *  -  0 - On success;
 *  - <0 - On error
3206
 */
3207
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3208
				 u32 val, ktime_t *abs_time, u32 bitset,
3209
				 u32 __user *uaddr2)
3210 3211
{
	struct hrtimer_sleeper timeout, *to = NULL;
3212
	struct futex_pi_state *pi_state = NULL;
3213 3214
	struct rt_mutex_waiter rt_waiter;
	struct futex_hash_bucket *hb;
3215 3216
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
3217 3218
	int res, ret;

3219 3220 3221
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

3222 3223 3224
	if (uaddr == uaddr2)
		return -EINVAL;

3225 3226 3227 3228 3229
	if (!bitset)
		return -EINVAL;

	if (abs_time) {
		to = &timeout;
3230 3231 3232
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
3233 3234 3235 3236 3237 3238 3239 3240 3241
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
3242
	rt_mutex_init_waiter(&rt_waiter);
3243

3244
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3245 3246 3247
	if (unlikely(ret != 0))
		goto out;

3248 3249 3250 3251
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

3252 3253 3254 3255
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
3256
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
3257 3258
	if (ret)
		goto out_key2;
3259

3260 3261 3262 3263 3264
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (match_futex(&q.key, &key2)) {
3265
		queue_unlock(hb);
3266 3267 3268 3269
		ret = -EINVAL;
		goto out_put_keys;
	}

3270
	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
3271
	futex_wait_queue_me(hb, &q, to);
3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
		goto out_put_keys;

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
3283 3284 3285
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
3296
			ret = fixup_pi_state_owner(uaddr2, &q, current);
3297 3298 3299 3300
			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
				pi_state = q.pi_state;
				get_pi_state(pi_state);
			}
3301 3302 3303 3304
			/*
			 * Drop the reference to the pi state which
			 * the requeue_pi() code acquired for us.
			 */
3305
			put_pi_state(q.pi_state);
3306 3307 3308
			spin_unlock(q.lock_ptr);
		}
	} else {
3309 3310
		struct rt_mutex *pi_mutex;

3311 3312 3313 3314 3315
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
3316
		WARN_ON(!q.pi_state);
3317
		pi_mutex = &q.pi_state->pi_mutex;
3318
		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3319 3320

		spin_lock(q.lock_ptr);
3321 3322 3323 3324
		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
			ret = 0;

		debug_rt_mutex_free_waiter(&rt_waiter);
3325 3326 3327 3328
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
3329
		res = fixup_owner(uaddr2, &q, !ret);
3330 3331
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
3332
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3333 3334 3335 3336
		 */
		if (res)
			ret = (res < 0) ? res : 0;

3337 3338 3339 3340 3341
		/*
		 * If fixup_pi_state_owner() faulted and was unable to handle
		 * the fault, unlock the rt_mutex and return the fault to
		 * userspace.
		 */
3342 3343 3344 3345
		if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
			pi_state = q.pi_state;
			get_pi_state(pi_state);
		}
3346

3347 3348 3349 3350
		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

3351 3352 3353 3354 3355
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

3356
	if (ret == -EINTR) {
3357
		/*
3358 3359 3360 3361 3362
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
3363
		 */
3364
		ret = -EWOULDBLOCK;
3365 3366 3367
	}

out_put_keys:
3368
	put_futex_key(&q.key);
T
Thomas Gleixner 已提交
3369
out_key2:
3370
	put_futex_key(&key2);
3371 3372 3373 3374 3375 3376 3377 3378 3379

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

3380 3381 3382 3383 3384 3385 3386
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
3387
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3388 3389 3390 3391 3392 3393 3394 3395
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
3396 3397 3398
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
3399
 */
3400 3401
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
3402
{
3403 3404
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
3417 3418 3419 3420
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
3421
 */
3422 3423 3424
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
3425
{
A
Al Viro 已提交
3426
	struct robust_list_head __user *head;
3427
	unsigned long ret;
3428
	struct task_struct *p;
3429

3430 3431 3432
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

3433 3434 3435
	rcu_read_lock();

	ret = -ESRCH;
3436
	if (!pid)
3437
		p = current;
3438
	else {
3439
		p = find_task_by_vpid(pid);
3440 3441 3442 3443
		if (!p)
			goto err_unlock;
	}

3444
	ret = -EPERM;
3445
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3446 3447 3448 3449 3450
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

3451 3452 3453 3454 3455
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
3456
	rcu_read_unlock();
3457 3458 3459 3460 3461 3462 3463 3464

	return ret;
}

/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
3465
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3466
{
3467
	u32 uval, uninitialized_var(nval), mval;
3468
	int err;
3469

3470 3471 3472 3473
	/* Futex address must be 32bit aligned */
	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
		return -1;

3474 3475
retry:
	if (get_user(uval, uaddr))
3476 3477
		return -1;

3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504
	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
		return 0;

	/*
	 * Ok, this dying thread is truly holding a futex
	 * of interest. Set the OWNER_DIED bit atomically
	 * via cmpxchg, and if the value had FUTEX_WAITERS
	 * set, wake up a waiter (if any). (We have to do a
	 * futex_wake() even if OWNER_DIED is already set -
	 * to handle the rare but possible case of recursive
	 * thread-death.) The rest of the cleanup is done in
	 * userspace.
	 */
	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;

	/*
	 * We are not holding a lock here, but we want to have
	 * the pagefault_disable/enable() protection because
	 * we want to handle the fault gracefully. If the
	 * access fails we try to fault in the futex with R/W
	 * verification via get_user_pages. get_user() above
	 * does not guarantee R/W access. If that fails we
	 * give up and leave the futex locked.
	 */
	if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
		switch (err) {
		case -EFAULT:
3505 3506 3507
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
3508 3509 3510

		case -EAGAIN:
			cond_resched();
3511
			goto retry;
3512

3513 3514 3515 3516
		default:
			WARN_ON_ONCE(1);
			return err;
		}
3517
	}
3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528

	if (nval != uval)
		goto retry;

	/*
	 * Wake robust non-PI futexes here. The wakeup of
	 * PI futexes happens in exit_pi_state():
	 */
	if (!pi && (uval & FUTEX_WAITERS))
		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);

3529 3530 3531
	return 0;
}

3532 3533 3534 3535
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
3536
				     struct robust_list __user * __user *head,
3537
				     unsigned int *pi)
3538 3539 3540
{
	unsigned long uentry;

A
Al Viro 已提交
3541
	if (get_user(uentry, (unsigned long __user *)head))
3542 3543
		return -EFAULT;

A
Al Viro 已提交
3544
	*entry = (void __user *)(uentry & ~1UL);
3545 3546 3547 3548 3549
	*pi = uentry & 1;

	return 0;
}

3550 3551 3552 3553 3554 3555 3556 3557 3558
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void exit_robust_list(struct task_struct *curr)
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
3559
	struct robust_list __user *entry, *next_entry, *pending;
3560 3561
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
3562
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
3563
	int rc;
3564

3565 3566 3567
	if (!futex_cmpxchg_enabled)
		return;

3568 3569 3570 3571
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
3572
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
3573 3574 3575 3576 3577 3578 3579 3580 3581 3582
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
3583
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3584
		return;
3585

M
Martin Schwidefsky 已提交
3586
	next_entry = NULL;	/* avoid warning with gcc */
3587
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
3588 3589 3590 3591 3592
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3593 3594
		/*
		 * A pending lock might already be on the list, so
3595
		 * don't process it twice:
3596 3597
		 */
		if (entry != pending)
A
Al Viro 已提交
3598
			if (handle_futex_death((void __user *)entry + futex_offset,
3599
						curr, pi))
3600
				return;
M
Martin Schwidefsky 已提交
3601
		if (rc)
3602
			return;
M
Martin Schwidefsky 已提交
3603 3604
		entry = next_entry;
		pi = next_pi;
3605 3606 3607 3608 3609 3610 3611 3612
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
3613 3614 3615 3616

	if (pending)
		handle_futex_death((void __user *)pending + futex_offset,
				   curr, pip);
3617 3618
}

3619
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3620
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
3621
{
T
Thomas Gleixner 已提交
3622
	int cmd = op & FUTEX_CMD_MASK;
3623
	unsigned int flags = 0;
E
Eric Dumazet 已提交
3624 3625

	if (!(op & FUTEX_PRIVATE_FLAG))
3626
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
3627

3628 3629
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
3630 3631
		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
		    cmd != FUTEX_WAIT_REQUEUE_PI)
3632 3633
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
3634

3635 3636 3637 3638 3639 3640 3641 3642 3643 3644
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
3645
	switch (cmd) {
L
Linus Torvalds 已提交
3646
	case FUTEX_WAIT:
3647
		val3 = FUTEX_BITSET_MATCH_ANY;
3648
		/* fall through */
3649
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
3650
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
3651
	case FUTEX_WAKE:
3652
		val3 = FUTEX_BITSET_MATCH_ANY;
3653
		/* fall through */
3654
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
3655
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
3656
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
3657
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
3658
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
3659
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3660
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
3661
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3662
	case FUTEX_LOCK_PI:
3663
		return futex_lock_pi(uaddr, flags, timeout, 0);
3664
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
3665
		return futex_unlock_pi(uaddr, flags);
3666
	case FUTEX_TRYLOCK_PI:
3667
		return futex_lock_pi(uaddr, flags, NULL, 1);
3668 3669
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
3670 3671
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
3672
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
3673
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
3674
	}
T
Thomas Gleixner 已提交
3675
	return -ENOSYS;
L
Linus Torvalds 已提交
3676 3677 3678
}


3679
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3680
		struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3681
		u32, val3)
L
Linus Torvalds 已提交
3682
{
3683
	struct timespec64 ts;
3684
	ktime_t t, *tp = NULL;
3685
	u32 val2 = 0;
E
Eric Dumazet 已提交
3686
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
3687

3688
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3689 3690
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3691 3692
		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
			return -EFAULT;
3693
		if (get_timespec64(&ts, utime))
L
Linus Torvalds 已提交
3694
			return -EFAULT;
3695
		if (!timespec64_valid(&ts))
3696
			return -EINVAL;
3697

3698
		t = timespec64_to_ktime(ts);
E
Eric Dumazet 已提交
3699
		if (cmd == FUTEX_WAIT)
3700
			t = ktime_add_safe(ktime_get(), t);
3701
		tp = &t;
L
Linus Torvalds 已提交
3702 3703
	}
	/*
3704
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3705
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
3706
	 */
3707
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3708
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3709
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
3710

3711
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
3712 3713
}

3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868
#ifdef CONFIG_COMPAT
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int
compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
		   compat_uptr_t __user *head, unsigned int *pi)
{
	if (get_user(*uentry, head))
		return -EFAULT;

	*entry = compat_ptr((*uentry) & ~1);
	*pi = (unsigned int)(*uentry) & 1;

	return 0;
}

static void __user *futex_uaddr(struct robust_list __user *entry,
				compat_long_t futex_offset)
{
	compat_uptr_t base = ptr_to_compat(entry);
	void __user *uaddr = compat_ptr(base + futex_offset);

	return uaddr;
}

/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void compat_exit_robust_list(struct task_struct *curr)
{
	struct compat_robust_list_head __user *head = curr->compat_robust_list;
	struct robust_list __user *entry, *next_entry, *pending;
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
	compat_uptr_t uentry, next_uentry, upending;
	compat_long_t futex_offset;
	int rc;

	if (!futex_cmpxchg_enabled)
		return;

	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
	if (compat_fetch_robust_entry(&upending, &pending,
			       &head->list_op_pending, &pip))
		return;

	next_entry = NULL;	/* avoid warning with gcc */
	while (entry != (struct robust_list __user *) &head->list) {
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
			(compat_uptr_t __user *)&entry->next, &next_pi);
		/*
		 * A pending lock might already be on the list, so
		 * dont process it twice:
		 */
		if (entry != pending) {
			void __user *uaddr = futex_uaddr(entry, futex_offset);

			if (handle_futex_death(uaddr, curr, pi))
				return;
		}
		if (rc)
			return;
		uentry = next_uentry;
		entry = next_entry;
		pi = next_pi;
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
	if (pending) {
		void __user *uaddr = futex_uaddr(pending, futex_offset);

		handle_futex_death(uaddr, curr, pip);
	}
}

COMPAT_SYSCALL_DEFINE2(set_robust_list,
		struct compat_robust_list_head __user *, head,
		compat_size_t, len)
{
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->compat_robust_list = head;

	return 0;
}

COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
			compat_uptr_t __user *, head_ptr,
			compat_size_t __user *, len_ptr)
{
	struct compat_robust_list_head __user *head;
	unsigned long ret;
	struct task_struct *p;

	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	rcu_read_lock();

	ret = -ESRCH;
	if (!pid)
		p = current;
	else {
		p = find_task_by_vpid(pid);
		if (!p)
			goto err_unlock;
	}

	ret = -EPERM;
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
		goto err_unlock;

	head = p->compat_robust_list;
	rcu_read_unlock();

	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(ptr_to_compat(head), head_ptr);

err_unlock:
	rcu_read_unlock();

	return ret;
}
3869
#endif /* CONFIG_COMPAT */
3870

3871
#ifdef CONFIG_COMPAT_32BIT_TIME
3872
SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
3873 3874 3875
		struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
		u32, val3)
{
3876
	struct timespec64 ts;
3877 3878 3879 3880 3881 3882 3883
	ktime_t t, *tp = NULL;
	int val2 = 0;
	int cmd = op & FUTEX_CMD_MASK;

	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3884
		if (get_old_timespec32(&ts, utime))
3885
			return -EFAULT;
3886
		if (!timespec64_valid(&ts))
3887 3888
			return -EINVAL;

3889
		t = timespec64_to_ktime(ts);
3890 3891 3892 3893 3894 3895 3896 3897 3898 3899
		if (cmd == FUTEX_WAIT)
			t = ktime_add_safe(ktime_get(), t);
		tp = &t;
	}
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
		val2 = (int) (unsigned long) utime;

	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
3900
#endif /* CONFIG_COMPAT_32BIT_TIME */
3901

3902
static void __init futex_detect_cmpxchg(void)
L
Linus Torvalds 已提交
3903
{
3904
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3905
	u32 curval;
3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923

	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
	 * implementation, the non-functional ones will return
	 * -ENOSYS.
	 */
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
		futex_cmpxchg_enabled = 1;
#endif
}

static int __init futex_init(void)
{
3924
	unsigned int futex_shift;
3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935
	unsigned long i;

#if CONFIG_BASE_SMALL
	futex_hashsize = 16;
#else
	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif

	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
					       futex_hashsize, 0,
					       futex_hashsize < 256 ? HASH_SMALL : 0,
3936 3937 3938
					       &futex_shift, NULL,
					       futex_hashsize, futex_hashsize);
	futex_hashsize = 1UL << futex_shift;
3939 3940

	futex_detect_cmpxchg();
3941

3942
	for (i = 0; i < futex_hashsize; i++) {
3943
		atomic_set(&futex_queues[i].waiters, 0);
3944
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
3945 3946 3947
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
3948 3949
	return 0;
}
3950
core_initcall(futex_init);