futex.c 108.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
12 13 14 15
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
16 17 18 19
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
20 21 22
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
23 24 25 26
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
27 28 29 30 31 32 33
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 */
34
#include <linux/compat.h>
L
Linus Torvalds 已提交
35 36 37
#include <linux/jhash.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
38
#include <linux/hugetlb.h>
C
Colin Cross 已提交
39
#include <linux/freezer.h>
M
Mike Rapoport 已提交
40
#include <linux/memblock.h>
41
#include <linux/fault-inject.h>
42

43
#include <asm/futex.h>
L
Linus Torvalds 已提交
44

45
#include "locking/rtmutex_common.h"
46

47
/*
48 49 50 51
 * READ this before attempting to hack on futexes!
 *
 * Basic futex operation and ordering guarantees
 * =============================================
52 53 54 55
 *
 * The waiter reads the futex value in user space and calls
 * futex_wait(). This function computes the hash bucket and acquires
 * the hash bucket lock. After that it reads the futex user space value
56 57 58
 * again and verifies that the data has not changed. If it has not changed
 * it enqueues itself into the hash bucket, releases the hash bucket lock
 * and schedules.
59 60
 *
 * The waker side modifies the user space value of the futex and calls
61 62 63
 * futex_wake(). This function computes the hash bucket and acquires the
 * hash bucket lock. Then it looks for waiters on that futex in the hash
 * bucket and wakes them.
64
 *
65 66 67 68 69
 * In futex wake up scenarios where no tasks are blocked on a futex, taking
 * the hb spinlock can be avoided and simply return. In order for this
 * optimization to work, ordering guarantees must exist so that the waiter
 * being added to the list is acknowledged when the list is concurrently being
 * checked by the waker, avoiding scenarios like the following:
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
 *
 * CPU 0                               CPU 1
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
 *   uval = *futex;
 *                                     *futex = newval;
 *                                     sys_futex(WAKE, futex);
 *                                       futex_wake(futex);
 *                                       if (queue_empty())
 *                                         return;
 *   if (uval == val)
 *      lock(hash_bucket(futex));
 *      queue();
 *     unlock(hash_bucket(futex));
 *     schedule();
 *
 * This would cause the waiter on CPU 0 to wait forever because it
 * missed the transition of the user space value from val to newval
 * and the waker did not find the waiter in the hash bucket queue.
 *
91 92 93 94 95
 * The correct serialization ensures that a waiter either observes
 * the changed user space value before blocking or is woken by a
 * concurrent waker:
 *
 * CPU 0                                 CPU 1
96 97 98
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
99
 *
100
 *   waiters++; (a)
101 102 103 104 105 106 107 108 109 110
 *   smp_mb(); (A) <-- paired with -.
 *                                  |
 *   lock(hash_bucket(futex));      |
 *                                  |
 *   uval = *futex;                 |
 *                                  |        *futex = newval;
 *                                  |        sys_futex(WAKE, futex);
 *                                  |          futex_wake(futex);
 *                                  |
 *                                  `--------> smp_mb(); (B)
111
 *   if (uval == val)
112
 *     queue();
113
 *     unlock(hash_bucket(futex));
114 115
 *     schedule();                         if (waiters)
 *                                           lock(hash_bucket(futex));
116 117
 *   else                                    wake_waiters(futex);
 *     waiters--; (b)                        unlock(hash_bucket(futex));
118
 *
119 120
 * Where (A) orders the waiters increment and the futex value read through
 * atomic operations (see hb_waiters_inc) and where (B) orders the write
121
 * to futex and the waiters read (see hb_waiters_pending()).
122 123 124 125 126 127 128 129 130 131 132 133
 *
 * This yields the following case (where X:=waiters, Y:=futex):
 *
 *	X = Y = 0
 *
 *	w[X]=1		w[Y]=1
 *	MB		MB
 *	r[Y]=y		r[X]=x
 *
 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 * the guarantee that we cannot both miss the futex variable change and the
 * enqueue.
134 135 136 137 138 139 140 141 142 143 144
 *
 * Note that a new waiter is accounted for in (a) even when it is possible that
 * the wait call can return error, in which case we backtrack from it in (b).
 * Refer to the comment in queue_lock().
 *
 * Similarly, in order to account for waiters being requeued on another
 * address we always increment the waiters for the destination bucket before
 * acquiring the lock. It then decrements them again  after releasing it -
 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 * will do the additional required waiter count housekeeping. This is done for
 * double_lock_hb() and double_unlock_hb(), respectively.
145 146
 */

147 148 149 150
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
static int  __read_mostly futex_cmpxchg_enabled;
151
#endif
152

153 154 155 156
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
157 158 159 160 161 162 163 164 165
#ifdef CONFIG_MMU
# define FLAGS_SHARED		0x01
#else
/*
 * NOMMU does not have per process address space. Let the compiler optimize
 * code away.
 */
# define FLAGS_SHARED		0x00
#endif
166 167 168
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
185
	refcount_t refcount;
186 187

	union futex_key key;
188
} __randomize_layout;
189

190 191
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
192
 * @list:		priority-sorted list of tasks waiting on this futex
193 194 195 196 197 198 199 200
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
201
 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
L
Linus Torvalds 已提交
202 203 204
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
205
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
206
 * The order of wakeup is always to make the first condition true, then
207 208 209 210
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
211 212
 */
struct futex_q {
P
Pierre Peiffer 已提交
213
	struct plist_node list;
L
Linus Torvalds 已提交
214

215
	struct task_struct *task;
L
Linus Torvalds 已提交
216 217
	spinlock_t *lock_ptr;
	union futex_key key;
218
	struct futex_pi_state *pi_state;
219
	struct rt_mutex_waiter *rt_waiter;
220
	union futex_key *requeue_pi_key;
221
	u32 bitset;
222
} __randomize_layout;
L
Linus Torvalds 已提交
223

224 225 226 227 228 229
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
230
/*
D
Darren Hart 已提交
231 232 233
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
234 235
 */
struct futex_hash_bucket {
236
	atomic_t waiters;
P
Pierre Peiffer 已提交
237 238
	spinlock_t lock;
	struct plist_head chain;
239
} ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
240

241 242 243 244 245 246 247 248 249 250 251
/*
 * The base of the bucket array and its size are always used together
 * (after initialization only in hash_futex()), so ensure that they
 * reside in the same cacheline.
 */
static struct {
	struct futex_hash_bucket *queues;
	unsigned long            hashsize;
} __futex_data __read_mostly __aligned(2*sizeof(long));
#define futex_queues   (__futex_data.queues)
#define futex_hashsize (__futex_data.hashsize)
252

L
Linus Torvalds 已提交
253

254 255 256 257 258 259 260 261
/*
 * Fault injections for futexes.
 */
#ifdef CONFIG_FAIL_FUTEX

static struct {
	struct fault_attr attr;

262
	bool ignore_private;
263 264
} fail_futex = {
	.attr = FAULT_ATTR_INITIALIZER,
265
	.ignore_private = false,
266 267 268 269 270 271 272 273
};

static int __init setup_fail_futex(char *str)
{
	return setup_fault_attr(&fail_futex.attr, str);
}
__setup("fail_futex=", setup_fail_futex);

274
static bool should_fail_futex(bool fshared)
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
{
	if (fail_futex.ignore_private && !fshared)
		return false;

	return should_fail(&fail_futex.attr, 1);
}

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init fail_futex_debugfs(void)
{
	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
	struct dentry *dir;

	dir = fault_create_debugfs_attr("fail_futex", NULL,
					&fail_futex.attr);
	if (IS_ERR(dir))
		return PTR_ERR(dir);

294 295
	debugfs_create_bool("ignore-private", mode, dir,
			    &fail_futex.ignore_private);
296 297 298 299 300 301 302 303 304 305 306 307 308 309
	return 0;
}

late_initcall(fail_futex_debugfs);

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

#else
static inline bool should_fail_futex(bool fshared)
{
	return false;
}
#endif /* CONFIG_FAIL_FUTEX */

310 311 312 313 314 315
#ifdef CONFIG_COMPAT
static void compat_exit_robust_list(struct task_struct *curr);
#else
static inline void compat_exit_robust_list(struct task_struct *curr) { }
#endif

316 317 318 319
/*
 * Reflects a new waiter being added to the waitqueue.
 */
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
320 321
{
#ifdef CONFIG_SMP
322
	atomic_inc(&hb->waiters);
323
	/*
324
	 * Full barrier (A), see the ordering comment above.
325
	 */
326
	smp_mb__after_atomic();
327 328 329 330 331 332 333 334 335 336 337 338 339
#endif
}

/*
 * Reflects a waiter being removed from the waitqueue by wakeup
 * paths.
 */
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	atomic_dec(&hb->waiters);
#endif
}
340

341 342 343
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
344 345 346 347
	/*
	 * Full barrier (B), see the ordering comment above.
	 */
	smp_mb();
348
	return atomic_read(&hb->waiters);
349
#else
350
	return 1;
351 352 353
#endif
}

354 355 356 357 358 359
/**
 * hash_futex - Return the hash bucket in the global hash
 * @key:	Pointer to the futex key for which the hash is calculated
 *
 * We hash on the keys returned from get_futex_key (see below) and return the
 * corresponding hash bucket in the global hash.
L
Linus Torvalds 已提交
360 361 362
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
T
Thomas Gleixner 已提交
363
	u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
L
Linus Torvalds 已提交
364
			  key->both.offset);
T
Thomas Gleixner 已提交
365

366
	return &futex_queues[hash & (futex_hashsize - 1)];
L
Linus Torvalds 已提交
367 368
}

369 370 371 372 373 374

/**
 * match_futex - Check whether two futex keys are equal
 * @key1:	Pointer to key1
 * @key2:	Pointer to key2
 *
L
Linus Torvalds 已提交
375 376 377 378
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
379 380
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
381 382 383 384
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

385 386 387 388 389
enum futex_access {
	FUTEX_READ,
	FUTEX_WRITE
};

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
/**
 * futex_setup_timer - set up the sleeping hrtimer.
 * @time:	ptr to the given timeout value
 * @timeout:	the hrtimer_sleeper structure to be set up
 * @flags:	futex flags
 * @range_ns:	optional range in ns
 *
 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
 *	   value given
 */
static inline struct hrtimer_sleeper *
futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
		  int flags, u64 range_ns)
{
	if (!time)
		return NULL;

407 408 409
	hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
410 411 412 413 414 415 416 417 418
	/*
	 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
	 * effectively the same as calling hrtimer_set_expires().
	 */
	hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);

	return timeout;
}

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
/*
 * Generate a machine wide unique identifier for this inode.
 *
 * This relies on u64 not wrapping in the life-time of the machine; which with
 * 1ns resolution means almost 585 years.
 *
 * This further relies on the fact that a well formed program will not unmap
 * the file while it has a (shared) futex waiting on it. This mapping will have
 * a file reference which pins the mount and inode.
 *
 * If for some reason an inode gets evicted and read back in again, it will get
 * a new sequence number and will _NOT_ match, even though it is the exact same
 * file.
 *
 * It is important that match_futex() will never have a false-positive, esp.
 * for PI futexes that can mess up the state. The above argues that false-negatives
 * are only possible for malformed programs.
 */
static u64 get_inode_sequence_number(struct inode *inode)
{
	static atomic64_t i_seq;
	u64 old;

	/* Does the inode already have a sequence number? */
	old = atomic64_read(&inode->i_sequence);
	if (likely(old))
		return old;

	for (;;) {
		u64 new = atomic64_add_return(1, &i_seq);
		if (WARN_ON_ONCE(!new))
			continue;

		old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
		if (old)
			return old;
		return new;
	}
}

E
Eric Dumazet 已提交
459
/**
460 461
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
462
 * @fshared:	false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED
463
 * @key:	address where result is stored.
464 465
 * @rw:		mapping needs to be read/write (values: FUTEX_READ,
 *              FUTEX_WRITE)
E
Eric Dumazet 已提交
466
 *
467 468
 * Return: a negative error code or 0
 *
469
 * The key words are stored in @key on success.
L
Linus Torvalds 已提交
470
 *
471
 * For shared mappings (when @fshared), the key is:
472
 *
473
 *   ( inode->i_sequence, page->index, offset_within_page )
474
 *
475 476 477
 * [ also see get_inode_sequence_number() ]
 *
 * For private mappings (or when !@fshared), the key is:
478
 *
479 480 481 482
 *   ( current->mm, address, 0 )
 *
 * This allows (cross process, where applicable) identification of the futex
 * without keeping the page pinned for the duration of the FUTEX_WAIT.
L
Linus Torvalds 已提交
483
 *
D
Darren Hart 已提交
484
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
485
 */
486 487
static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
			 enum futex_access rw)
L
Linus Torvalds 已提交
488
{
489
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
490
	struct mm_struct *mm = current->mm;
491
	struct page *page, *tail;
492
	struct address_space *mapping;
493
	int err, ro = 0;
L
Linus Torvalds 已提交
494 495 496 497

	/*
	 * The futex address must be "naturally" aligned.
	 */
498
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
499
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
500
		return -EINVAL;
501
	address -= key->both.offset;
L
Linus Torvalds 已提交
502

503
	if (unlikely(!access_ok(uaddr, sizeof(u32))))
504 505
		return -EFAULT;

506 507 508
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

E
Eric Dumazet 已提交
509 510 511 512 513 514 515 516 517 518 519 520
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		key->private.mm = mm;
		key->private.address = address;
		return 0;
	}
L
Linus Torvalds 已提交
521

522
again:
523
	/* Ignore any VERIFY_READ mapping (futex common case) */
524
	if (unlikely(should_fail_futex(true)))
525 526
		return -EFAULT;

527
	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
528 529 530 531
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
532
	if (err == -EFAULT && rw == FUTEX_READ) {
533 534 535
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
536 537
	if (err < 0)
		return err;
538 539
	else
		err = 0;
540

541 542 543 544 545 546 547 548 549 550
	/*
	 * The treatment of mapping from this point on is critical. The page
	 * lock protects many things but in this context the page lock
	 * stabilizes mapping, prevents inode freeing in the shared
	 * file-backed region case and guards against movement to swap cache.
	 *
	 * Strictly speaking the page lock is not needed in all cases being
	 * considered here and page lock forces unnecessarily serialization
	 * From this point on, mapping will be re-verified if necessary and
	 * page lock will be acquired only if it is unavoidable
551 552 553 554 555 556 557
	 *
	 * Mapping checks require the head page for any compound page so the
	 * head page and mapping is looked up now. For anonymous pages, it
	 * does not matter if the page splits in the future as the key is
	 * based on the address. For filesystem-backed pages, the tail is
	 * required as the index of the page determines the key. For
	 * base pages, there is no tail page and tail == page.
558
	 */
559
	tail = page;
560 561 562
	page = compound_head(page);
	mapping = READ_ONCE(page->mapping);

563
	/*
564
	 * If page->mapping is NULL, then it cannot be a PageAnon
565 566 567 568 569 570 571 572 573 574 575
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
576
	 * an unlikely race, but we do need to retry for page->mapping.
577
	 */
578 579 580 581 582 583 584 585 586 587
	if (unlikely(!mapping)) {
		int shmem_swizzled;

		/*
		 * Page lock is required to identify which special case above
		 * applies. If this is really a shmem page then the page lock
		 * will prevent unexpected transitions.
		 */
		lock_page(page);
		shmem_swizzled = PageSwapCache(page) || page->mapping;
588 589
		unlock_page(page);
		put_page(page);
590

591 592
		if (shmem_swizzled)
			goto again;
593

594
		return -EFAULT;
595
	}
L
Linus Torvalds 已提交
596 597 598 599

	/*
	 * Private mappings are handled in a simple way.
	 *
600 601 602
	 * If the futex key is stored on an anonymous page, then the associated
	 * object is the mm which is implicitly pinned by the calling process.
	 *
L
Linus Torvalds 已提交
603 604
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
605
	 * the object not the particular process.
L
Linus Torvalds 已提交
606
	 */
607
	if (PageAnon(page)) {
608 609 610 611
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
612
		if (unlikely(should_fail_futex(true)) || ro) {
613 614 615 616
			err = -EFAULT;
			goto out;
		}

617
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
618
		key->private.mm = mm;
619
		key->private.address = address;
620

621
	} else {
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
		struct inode *inode;

		/*
		 * The associated futex object in this case is the inode and
		 * the page->mapping must be traversed. Ordinarily this should
		 * be stabilised under page lock but it's not strictly
		 * necessary in this case as we just want to pin the inode, not
		 * update the radix tree or anything like that.
		 *
		 * The RCU read lock is taken as the inode is finally freed
		 * under RCU. If the mapping still matches expectations then the
		 * mapping->host can be safely accessed as being a valid inode.
		 */
		rcu_read_lock();

		if (READ_ONCE(page->mapping) != mapping) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		inode = READ_ONCE(mapping->host);
		if (!inode) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

652
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
653
		key->shared.i_seq = get_inode_sequence_number(inode);
654
		key->shared.pgoff = basepage_index(tail);
655
		rcu_read_unlock();
L
Linus Torvalds 已提交
656 657
	}

658
out:
659
	put_page(page);
660
	return err;
L
Linus Torvalds 已提交
661 662
}

663 664
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
665 666 667 668 669
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
670
 * We have no generic implementation of a non-destructive write to the
671 672 673 674 675 676
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
677 678 679
	struct mm_struct *mm = current->mm;
	int ret;

680
	mmap_read_lock(mm);
681
	ret = fixup_user_fault(mm, (unsigned long)uaddr,
682
			       FAULT_FLAG_WRITE, NULL);
683
	mmap_read_unlock(mm);
684

685 686 687
	return ret < 0 ? ret : 0;
}

688 689
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
690 691
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

707 708
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
709
{
710
	int ret;
T
Thomas Gleixner 已提交
711 712

	pagefault_disable();
713
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
714 715
	pagefault_enable();

716
	return ret;
T
Thomas Gleixner 已提交
717 718 719
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
720 721 722
{
	int ret;

723
	pagefault_disable();
724
	ret = __get_user(*dest, from);
725
	pagefault_enable();
L
Linus Torvalds 已提交
726 727 728 729

	return ret ? -EFAULT : 0;
}

730 731 732 733 734 735 736 737 738 739 740

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

741
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
742 743 744 745 746 747 748

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
749
	refcount_set(&pi_state->refcount, 1);
750
	pi_state->key = FUTEX_KEY_INIT;
751 752 753 754 755 756

	current->pi_state_cache = pi_state;

	return 0;
}

P
Peter Zijlstra 已提交
757
static struct futex_pi_state *alloc_pi_state(void)
758 759 760 761 762 763 764 765 766
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

P
Peter Zijlstra 已提交
767 768
static void get_pi_state(struct futex_pi_state *pi_state)
{
769
	WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
P
Peter Zijlstra 已提交
770 771
}

772
/*
773 774
 * Drops a reference to the pi_state object and frees or caches it
 * when the last reference is gone.
775
 */
776
static void put_pi_state(struct futex_pi_state *pi_state)
777
{
778 779 780
	if (!pi_state)
		return;

781
	if (!refcount_dec_and_test(&pi_state->refcount))
782 783 784 785 786 787 788
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
789
		struct task_struct *owner;
790

791 792 793 794 795 796 797 798 799
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		owner = pi_state->owner;
		if (owner) {
			raw_spin_lock(&owner->pi_lock);
			list_del_init(&pi_state->list);
			raw_spin_unlock(&owner->pi_lock);
		}
		rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
800 801
	}

802
	if (current->pi_state_cache) {
803
		kfree(pi_state);
804
	} else {
805 806 807 808 809 810
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
811
		refcount_set(&pi_state->refcount, 1);
812 813 814 815
		current->pi_state_cache = pi_state;
	}
}

816 817
#ifdef CONFIG_FUTEX_PI

818 819 820 821 822
/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
823
static void exit_pi_state_list(struct task_struct *curr)
824 825 826
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
827
	struct futex_hash_bucket *hb;
828
	union futex_key key = FUTEX_KEY_INIT;
829

830 831
	if (!futex_cmpxchg_enabled)
		return;
832 833 834
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
835
	 * versus waiters unqueueing themselves:
836
	 */
837
	raw_spin_lock_irq(&curr->pi_lock);
838 839 840 841
	while (!list_empty(head)) {
		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
842
		hb = hash_futex(&key);
843 844 845 846 847 848 849 850 851 852 853

		/*
		 * We can race against put_pi_state() removing itself from the
		 * list (a waiter going away). put_pi_state() will first
		 * decrement the reference count and then modify the list, so
		 * its possible to see the list entry but fail this reference
		 * acquire.
		 *
		 * In that case; drop the locks to let put_pi_state() make
		 * progress and retry the loop.
		 */
854
		if (!refcount_inc_not_zero(&pi_state->refcount)) {
855 856 857 858 859
			raw_spin_unlock_irq(&curr->pi_lock);
			cpu_relax();
			raw_spin_lock_irq(&curr->pi_lock);
			continue;
		}
860
		raw_spin_unlock_irq(&curr->pi_lock);
861 862

		spin_lock(&hb->lock);
863 864
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		raw_spin_lock(&curr->pi_lock);
865 866 867 868
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
869
		if (head->next != next) {
870
			/* retain curr->pi_lock for the loop invariant */
871
			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
872
			spin_unlock(&hb->lock);
873
			put_pi_state(pi_state);
874 875 876 877
			continue;
		}

		WARN_ON(pi_state->owner != curr);
878 879
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
880 881
		pi_state->owner = NULL;

882
		raw_spin_unlock(&curr->pi_lock);
883
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
884 885
		spin_unlock(&hb->lock);

886 887 888
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);

889
		raw_spin_lock_irq(&curr->pi_lock);
890
	}
891
	raw_spin_unlock_irq(&curr->pi_lock);
892
}
893 894
#else
static inline void exit_pi_state_list(struct task_struct *curr) { }
895 896
#endif

897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
/*
 * We need to check the following states:
 *
 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 *
 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 *
 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 *
 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 *
 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 *
 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 *
 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 *
 * [1]	Indicates that the kernel can acquire the futex atomically. We
 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 *
 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 *      thread is found then it indicates that the owner TID has died.
 *
 * [3]	Invalid. The waiter is queued on a non PI futex
 *
 * [4]	Valid state after exit_robust_list(), which sets the user space
 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 *
 * [5]	The user space value got manipulated between exit_robust_list()
 *	and exit_pi_state_list()
 *
 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 *	the pi_state but cannot access the user space value.
 *
 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 *
 * [8]	Owner and user space value match
 *
 * [9]	There is no transient state which sets the user space TID to 0
 *	except exit_robust_list(), but this is indicated by the
 *	FUTEX_OWNER_DIED bit. See [4]
 *
 * [10] There is no transient state which leaves owner and user space
 *	TID out of sync.
P
Peter Zijlstra 已提交
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
 *
 *
 * Serialization and lifetime rules:
 *
 * hb->lock:
 *
 *	hb -> futex_q, relation
 *	futex_q -> pi_state, relation
 *
 *	(cannot be raw because hb can contain arbitrary amount
 *	 of futex_q's)
 *
 * pi_mutex->wait_lock:
 *
 *	{uval, pi_state}
 *
 *	(and pi_mutex 'obviously')
 *
 * p->pi_lock:
 *
 *	p->pi_state_list -> pi_state->list, relation
 *
 * pi_state->refcount:
 *
 *	pi_state lifetime
 *
 *
 * Lock order:
 *
 *   hb->lock
 *     pi_mutex->wait_lock
 *       p->pi_lock
 *
978
 */
979 980 981 982 983 984

/*
 * Validate that the existing waiter has a pi_state and sanity check
 * the pi_state against the user space value. If correct, attach to
 * it.
 */
P
Peter Zijlstra 已提交
985 986
static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
			      struct futex_pi_state *pi_state,
987
			      struct futex_pi_state **ps)
988
{
989
	pid_t pid = uval & FUTEX_TID_MASK;
990 991
	u32 uval2;
	int ret;
992

993 994 995 996 997
	/*
	 * Userspace might have messed up non-PI and PI futexes [3]
	 */
	if (unlikely(!pi_state))
		return -EINVAL;
998

P
Peter Zijlstra 已提交
999 1000 1001 1002 1003 1004
	/*
	 * We get here with hb->lock held, and having found a
	 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
	 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
	 * which in turn means that futex_lock_pi() still has a reference on
	 * our pi_state.
1005 1006 1007 1008 1009
	 *
	 * The waiter holding a reference on @pi_state also protects against
	 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
	 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
	 * free pi_state before we can take a reference ourselves.
P
Peter Zijlstra 已提交
1010
	 */
1011
	WARN_ON(!refcount_read(&pi_state->refcount));
1012

P
Peter Zijlstra 已提交
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	/*
	 * Now that we have a pi_state, we can acquire wait_lock
	 * and do the state validation.
	 */
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	/*
	 * Since {uval, pi_state} is serialized by wait_lock, and our current
	 * uval was read without holding it, it can have changed. Verify it
	 * still is what we expect it to be, otherwise retry the entire
	 * operation.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		goto out_efault;

	if (uval != uval2)
		goto out_eagain;

1031 1032 1033 1034
	/*
	 * Handle the owner died case:
	 */
	if (uval & FUTEX_OWNER_DIED) {
1035
		/*
1036 1037 1038
		 * exit_pi_state_list sets owner to NULL and wakes the
		 * topmost waiter. The task which acquires the
		 * pi_state->rt_mutex will fixup owner.
1039
		 */
1040
		if (!pi_state->owner) {
1041
			/*
1042 1043
			 * No pi state owner, but the user space TID
			 * is not 0. Inconsistent state. [5]
1044
			 */
1045
			if (pid)
P
Peter Zijlstra 已提交
1046
				goto out_einval;
1047
			/*
1048
			 * Take a ref on the state and return success. [4]
1049
			 */
P
Peter Zijlstra 已提交
1050
			goto out_attach;
1051
		}
1052 1053

		/*
1054 1055 1056 1057 1058 1059 1060 1061
		 * If TID is 0, then either the dying owner has not
		 * yet executed exit_pi_state_list() or some waiter
		 * acquired the rtmutex in the pi state, but did not
		 * yet fixup the TID in user space.
		 *
		 * Take a ref on the state and return success. [6]
		 */
		if (!pid)
P
Peter Zijlstra 已提交
1062
			goto out_attach;
1063 1064 1065 1066
	} else {
		/*
		 * If the owner died bit is not set, then the pi_state
		 * must have an owner. [7]
1067
		 */
1068
		if (!pi_state->owner)
P
Peter Zijlstra 已提交
1069
			goto out_einval;
1070 1071
	}

1072 1073 1074 1075 1076 1077
	/*
	 * Bail out if user space manipulated the futex value. If pi
	 * state exists then the owner TID must be the same as the
	 * user space TID. [9/10]
	 */
	if (pid != task_pid_vnr(pi_state->owner))
P
Peter Zijlstra 已提交
1078 1079 1080
		goto out_einval;

out_attach:
P
Peter Zijlstra 已提交
1081
	get_pi_state(pi_state);
P
Peter Zijlstra 已提交
1082
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1083 1084
	*ps = pi_state;
	return 0;
P
Peter Zijlstra 已提交
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100

out_einval:
	ret = -EINVAL;
	goto out_error;

out_eagain:
	ret = -EAGAIN;
	goto out_error;

out_efault:
	ret = -EFAULT;
	goto out_error;

out_error:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
1101 1102
}

T
Thomas Gleixner 已提交
1103 1104
/**
 * wait_for_owner_exiting - Block until the owner has exited
1105
 * @ret: owner's current futex lock status
T
Thomas Gleixner 已提交
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
 * @exiting:	Pointer to the exiting task
 *
 * Caller must hold a refcount on @exiting.
 */
static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
{
	if (ret != -EBUSY) {
		WARN_ON_ONCE(exiting);
		return;
	}

	if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
		return;

	mutex_lock(&exiting->futex_exit_mutex);
	/*
	 * No point in doing state checking here. If the waiter got here
	 * while the task was in exec()->exec_futex_release() then it can
	 * have any FUTEX_STATE_* value when the waiter has acquired the
	 * mutex. OK, if running, EXITING or DEAD if it reached exit()
	 * already. Highly unlikely and not a problem. Just one more round
	 * through the futex maze.
	 */
	mutex_unlock(&exiting->futex_exit_mutex);

	put_task_struct(exiting);
}

T
Thomas Gleixner 已提交
1134 1135 1136 1137 1138 1139
static int handle_exit_race(u32 __user *uaddr, u32 uval,
			    struct task_struct *tsk)
{
	u32 uval2;

	/*
1140 1141
	 * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
	 * caller that the alleged owner is busy.
T
Thomas Gleixner 已提交
1142
	 */
1143
	if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
1144
		return -EBUSY;
T
Thomas Gleixner 已提交
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161

	/*
	 * Reread the user space value to handle the following situation:
	 *
	 * CPU0				CPU1
	 *
	 * sys_exit()			sys_futex()
	 *  do_exit()			 futex_lock_pi()
	 *                                futex_lock_pi_atomic()
	 *   exit_signals(tsk)		    No waiters:
	 *    tsk->flags |= PF_EXITING;	    *uaddr == 0x00000PID
	 *  mm_release(tsk)		    Set waiter bit
	 *   exit_robust_list(tsk) {	    *uaddr = 0x80000PID;
	 *      Set owner died		    attach_to_pi_owner() {
	 *    *uaddr = 0xC0000000;	     tsk = get_task(PID);
	 *   }				     if (!tsk->flags & PF_EXITING) {
	 *  ...				       attach();
1162 1163 1164
	 *  tsk->futex_state =               } else {
	 *	FUTEX_STATE_DEAD;              if (tsk->futex_state !=
	 *					  FUTEX_STATE_DEAD)
T
Thomas Gleixner 已提交
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	 *				         return -EAGAIN;
	 *				       return -ESRCH; <--- FAIL
	 *				     }
	 *
	 * Returning ESRCH unconditionally is wrong here because the
	 * user space value has been changed by the exiting task.
	 *
	 * The same logic applies to the case where the exiting task is
	 * already gone.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		return -EFAULT;

	/* If the user space value has changed, try again. */
	if (uval2 != uval)
		return -EAGAIN;

	/*
	 * The exiting task did not have a robust list, the robust list was
	 * corrupted or the user space value in *uaddr is simply bogus.
	 * Give up and tell user space.
	 */
	return -ESRCH;
}

1190 1191 1192 1193
/*
 * Lookup the task for the TID provided from user space and attach to
 * it after doing proper sanity checks.
 */
T
Thomas Gleixner 已提交
1194
static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
T
Thomas Gleixner 已提交
1195 1196
			      struct futex_pi_state **ps,
			      struct task_struct **exiting)
1197 1198
{
	pid_t pid = uval & FUTEX_TID_MASK;
1199 1200
	struct futex_pi_state *pi_state;
	struct task_struct *p;
1201

1202
	/*
1203
	 * We are the first waiter - try to look up the real owner and attach
1204
	 * the new pi_state to it, but bail out when TID = 0 [1]
T
Thomas Gleixner 已提交
1205 1206 1207
	 *
	 * The !pid check is paranoid. None of the call sites should end up
	 * with pid == 0, but better safe than sorry. Let the caller retry
1208
	 */
1209
	if (!pid)
T
Thomas Gleixner 已提交
1210
		return -EAGAIN;
1211
	p = find_get_task_by_vpid(pid);
1212
	if (!p)
T
Thomas Gleixner 已提交
1213
		return handle_exit_race(uaddr, uval, NULL);
1214

1215
	if (unlikely(p->flags & PF_KTHREAD)) {
1216 1217 1218 1219
		put_task_struct(p);
		return -EPERM;
	}

1220
	/*
1221 1222 1223
	 * We need to look at the task state to figure out, whether the
	 * task is exiting. To protect against the change of the task state
	 * in futex_exit_release(), we do this protected by p->pi_lock:
1224
	 */
1225
	raw_spin_lock_irq(&p->pi_lock);
1226
	if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
1227
		/*
1228 1229 1230
		 * The task is on the way out. When the futex state is
		 * FUTEX_STATE_DEAD, we know that the task has finished
		 * the cleanup:
1231
		 */
T
Thomas Gleixner 已提交
1232
		int ret = handle_exit_race(uaddr, uval, p);
1233

1234
		raw_spin_unlock_irq(&p->pi_lock);
T
Thomas Gleixner 已提交
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
		/*
		 * If the owner task is between FUTEX_STATE_EXITING and
		 * FUTEX_STATE_DEAD then store the task pointer and keep
		 * the reference on the task struct. The calling code will
		 * drop all locks, wait for the task to reach
		 * FUTEX_STATE_DEAD and then drop the refcount. This is
		 * required to prevent a live lock when the current task
		 * preempted the exiting task between the two states.
		 */
		if (ret == -EBUSY)
			*exiting = p;
		else
			put_task_struct(p);
1248 1249
		return ret;
	}
1250

1251 1252
	/*
	 * No existing pi state. First waiter. [2]
P
Peter Zijlstra 已提交
1253 1254 1255
	 *
	 * This creates pi_state, we have hb->lock held, this means nothing can
	 * observe this state, wait_lock is irrelevant.
1256
	 */
1257 1258 1259
	pi_state = alloc_pi_state();

	/*
1260
	 * Initialize the pi_mutex in locked state and make @p
1261 1262 1263 1264 1265
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
1266
	pi_state->key = *key;
1267

1268
	WARN_ON(!list_empty(&pi_state->list));
1269
	list_add(&pi_state->list, &p->pi_state_list);
1270 1271 1272 1273
	/*
	 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
	 * because there is no concurrency as the object is not published yet.
	 */
1274
	pi_state->owner = p;
1275
	raw_spin_unlock_irq(&p->pi_lock);
1276 1277 1278

	put_task_struct(p);

P
Pierre Peiffer 已提交
1279
	*ps = pi_state;
1280 1281 1282 1283

	return 0;
}

P
Peter Zijlstra 已提交
1284 1285
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
			   struct futex_hash_bucket *hb,
T
Thomas Gleixner 已提交
1286 1287
			   union futex_key *key, struct futex_pi_state **ps,
			   struct task_struct **exiting)
1288
{
1289
	struct futex_q *top_waiter = futex_top_waiter(hb, key);
1290 1291 1292 1293 1294

	/*
	 * If there is a waiter on that futex, validate it and
	 * attach to the pi_state when the validation succeeds.
	 */
1295
	if (top_waiter)
P
Peter Zijlstra 已提交
1296
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1297 1298 1299 1300 1301

	/*
	 * We are the first waiter - try to look up the owner based on
	 * @uval and attach to it.
	 */
T
Thomas Gleixner 已提交
1302
	return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
1303 1304
}

1305 1306
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
1307
	int err;
1308
	u32 curval;
1309

1310 1311 1312
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1313 1314 1315
	err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
	if (unlikely(err))
		return err;
1316

P
Peter Zijlstra 已提交
1317
	/* If user space value changed, let the caller retry */
1318 1319 1320
	return curval != uval ? -EAGAIN : 0;
}

1321
/**
1322
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1323 1324 1325 1326 1327 1328 1329
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
T
Thomas Gleixner 已提交
1330 1331
 * @exiting:		Pointer to store the task pointer of the owner task
 *			which is in the middle of exiting
1332
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1333
 *
1334
 * Return:
1335 1336 1337
 *  -  0 - ready to wait;
 *  -  1 - acquired the lock;
 *  - <0 - error
1338 1339
 *
 * The hb->lock and futex_key refs shall be held by the caller.
T
Thomas Gleixner 已提交
1340 1341 1342 1343
 *
 * @exiting is only set when the return value is -EBUSY. If so, this holds
 * a refcount on the exiting task on return and the caller needs to drop it
 * after waiting for the exit to complete.
1344 1345 1346 1347
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
T
Thomas Gleixner 已提交
1348 1349 1350
				struct task_struct *task,
				struct task_struct **exiting,
				int set_waiters)
1351
{
1352
	u32 uval, newval, vpid = task_pid_vnr(task);
1353
	struct futex_q *top_waiter;
1354
	int ret;
1355 1356

	/*
1357 1358
	 * Read the user space value first so we can validate a few
	 * things before proceeding further.
1359
	 */
1360
	if (get_futex_value_locked(&uval, uaddr))
1361 1362
		return -EFAULT;

1363 1364 1365
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1366 1367 1368
	/*
	 * Detect deadlocks.
	 */
1369
	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1370 1371
		return -EDEADLK;

1372 1373 1374
	if ((unlikely(should_fail_futex(true))))
		return -EDEADLK;

1375
	/*
1376 1377
	 * Lookup existing state first. If it exists, try to attach to
	 * its pi_state.
1378
	 */
1379 1380
	top_waiter = futex_top_waiter(hb, key);
	if (top_waiter)
P
Peter Zijlstra 已提交
1381
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1382 1383

	/*
1384 1385 1386 1387
	 * No waiter and user TID is 0. We are here because the
	 * waiters or the owner died bit is set or called from
	 * requeue_cmp_pi or for whatever reason something took the
	 * syscall.
1388
	 */
1389
	if (!(uval & FUTEX_TID_MASK)) {
1390
		/*
1391 1392
		 * We take over the futex. No other waiters and the user space
		 * TID is 0. We preserve the owner died bit.
1393
		 */
1394 1395
		newval = uval & FUTEX_OWNER_DIED;
		newval |= vpid;
1396

1397 1398 1399 1400 1401 1402 1403 1404
		/* The futex requeue_pi code can enforce the waiters bit */
		if (set_waiters)
			newval |= FUTEX_WAITERS;

		ret = lock_pi_update_atomic(uaddr, uval, newval);
		/* If the take over worked, return 1 */
		return ret < 0 ? ret : 1;
	}
1405 1406

	/*
1407 1408 1409
	 * First waiter. Set the waiters bit before attaching ourself to
	 * the owner. If owner tries to unlock, it will be forced into
	 * the kernel and blocked on hb->lock.
1410
	 */
1411 1412 1413 1414
	newval = uval | FUTEX_WAITERS;
	ret = lock_pi_update_atomic(uaddr, uval, newval);
	if (ret)
		return ret;
1415
	/*
1416 1417 1418
	 * If the update of the user space value succeeded, we try to
	 * attach to the owner. If that fails, no harm done, we only
	 * set the FUTEX_WAITERS bit in the user space variable.
1419
	 */
T
Thomas Gleixner 已提交
1420
	return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
1421 1422
}

1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

1433
	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1434
		return;
1435
	lockdep_assert_held(q->lock_ptr);
1436 1437 1438

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
1439
	hb_waiters_dec(hb);
1440 1441
}

L
Linus Torvalds 已提交
1442 1443
/*
 * The hash bucket lock must be held when this is called.
1444 1445 1446
 * Afterwards, the futex_q must not be accessed. Callers
 * must ensure to later call wake_up_q() for the actual
 * wakeups to occur.
L
Linus Torvalds 已提交
1447
 */
1448
static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
L
Linus Torvalds 已提交
1449
{
T
Thomas Gleixner 已提交
1450 1451
	struct task_struct *p = q->task;

1452 1453 1454
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

1455
	get_task_struct(p);
1456
	__unqueue_futex(q);
L
Linus Torvalds 已提交
1457
	/*
1458 1459 1460 1461 1462
	 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
	 * is written, without taking any locks. This is possible in the event
	 * of a spurious wakeup, for example. A memory barrier is required here
	 * to prevent the following store to lock_ptr from getting ahead of the
	 * plist_del in __unqueue_futex().
L
Linus Torvalds 已提交
1463
	 */
1464
	smp_store_release(&q->lock_ptr, NULL);
1465 1466 1467

	/*
	 * Queue the task for later wakeup for after we've released
1468
	 * the hb->lock.
1469
	 */
1470
	wake_q_add_safe(wake_q, p);
L
Linus Torvalds 已提交
1471 1472
}

1473 1474 1475 1476
/*
 * Caller must hold a reference on @pi_state.
 */
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1477
{
1478
	u32 curval, newval;
1479
	struct task_struct *new_owner;
P
Peter Zijlstra 已提交
1480
	bool postunlock = false;
1481
	DEFINE_WAKE_Q(wake_q);
1482
	int ret = 0;
1483 1484

	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1485
	if (WARN_ON_ONCE(!new_owner)) {
1486
		/*
1487
		 * As per the comment in futex_unlock_pi() this should not happen.
1488 1489 1490 1491 1492 1493 1494 1495
		 *
		 * When this happens, give up our locks and try again, giving
		 * the futex_lock_pi() instance time to complete, either by
		 * waiting on the rtmutex or removing itself from the futex
		 * queue.
		 */
		ret = -EAGAIN;
		goto out_unlock;
1496
	}
1497 1498

	/*
1499 1500 1501
	 * We pass it to the next owner. The WAITERS bit is always kept
	 * enabled while there is PI state around. We cleanup the owner
	 * died bit, because we are the owner.
1502
	 */
1503
	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1504

1505 1506 1507
	if (unlikely(should_fail_futex(true)))
		ret = -EFAULT;

1508 1509
	ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
	if (!ret && (curval != uval)) {
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
		/*
		 * If a unconditional UNLOCK_PI operation (user space did not
		 * try the TID->0 transition) raced with a waiter setting the
		 * FUTEX_WAITERS flag between get_user() and locking the hash
		 * bucket lock, retry the operation.
		 */
		if ((FUTEX_TID_MASK & curval) == uval)
			ret = -EAGAIN;
		else
			ret = -EINVAL;
	}
P
Peter Zijlstra 已提交
1521

1522 1523
	if (ret)
		goto out_unlock;
1524

1525 1526 1527 1528 1529
	/*
	 * This is a point of no return; once we modify the uval there is no
	 * going back and subsequent operations must not fail.
	 */

1530
	raw_spin_lock(&pi_state->owner->pi_lock);
1531 1532
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
1533
	raw_spin_unlock(&pi_state->owner->pi_lock);
1534

1535
	raw_spin_lock(&new_owner->pi_lock);
1536
	WARN_ON(!list_empty(&pi_state->list));
1537 1538
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
1539
	raw_spin_unlock(&new_owner->pi_lock);
1540

P
Peter Zijlstra 已提交
1541
	postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1542

1543
out_unlock:
1544 1545
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

P
Peter Zijlstra 已提交
1546 1547
	if (postunlock)
		rt_mutex_postunlock(&wake_q);
1548

1549
	return ret;
1550 1551
}

I
Ingo Molnar 已提交
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
1568 1569 1570
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
1571
	spin_unlock(&hb1->lock);
1572 1573
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
1574 1575
}

L
Linus Torvalds 已提交
1576
/*
D
Darren Hart 已提交
1577
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
1578
 */
1579 1580
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
1581
{
1582
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1583
	struct futex_q *this, *next;
1584
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
1585
	int ret;
1586
	DEFINE_WAKE_Q(wake_q);
L
Linus Torvalds 已提交
1587

1588 1589 1590
	if (!bitset)
		return -EINVAL;

1591
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
L
Linus Torvalds 已提交
1592
	if (unlikely(ret != 0))
A
André Almeida 已提交
1593
		return ret;
L
Linus Torvalds 已提交
1594

1595
	hb = hash_futex(&key);
1596 1597 1598

	/* Make sure we really have tasks to wakeup */
	if (!hb_waiters_pending(hb))
A
André Almeida 已提交
1599
		return ret;
1600

1601
	spin_lock(&hb->lock);
L
Linus Torvalds 已提交
1602

J
Jason Low 已提交
1603
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
L
Linus Torvalds 已提交
1604
		if (match_futex (&this->key, &key)) {
1605
			if (this->pi_state || this->rt_waiter) {
1606 1607 1608
				ret = -EINVAL;
				break;
			}
1609 1610 1611 1612 1613

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

1614
			mark_wake_futex(&wake_q, this);
L
Linus Torvalds 已提交
1615 1616 1617 1618 1619
			if (++ret >= nr_wake)
				break;
		}
	}

1620
	spin_unlock(&hb->lock);
1621
	wake_up_q(&wake_q);
L
Linus Torvalds 已提交
1622 1623 1624
	return ret;
}

1625 1626 1627 1628
static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{
	unsigned int op =	  (encoded_op & 0x70000000) >> 28;
	unsigned int cmp =	  (encoded_op & 0x0f000000) >> 24;
1629 1630
	int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
	int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1631 1632 1633
	int oldval, ret;

	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
		if (oparg < 0 || oparg > 31) {
			char comm[sizeof(current->comm)];
			/*
			 * kill this print and return -EINVAL when userspace
			 * is sane again
			 */
			pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
					get_task_comm(comm, current), oparg);
			oparg &= 31;
		}
1644 1645 1646
		oparg = 1 << oparg;
	}

1647
	pagefault_disable();
1648
	ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1649
	pagefault_enable();
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
	if (ret)
		return ret;

	switch (cmp) {
	case FUTEX_OP_CMP_EQ:
		return oldval == cmparg;
	case FUTEX_OP_CMP_NE:
		return oldval != cmparg;
	case FUTEX_OP_CMP_LT:
		return oldval < cmparg;
	case FUTEX_OP_CMP_GE:
		return oldval >= cmparg;
	case FUTEX_OP_CMP_LE:
		return oldval <= cmparg;
	case FUTEX_OP_CMP_GT:
		return oldval > cmparg;
	default:
		return -ENOSYS;
	}
}

1671 1672 1673 1674
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1675
static int
1676
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1677
	      int nr_wake, int nr_wake2, int op)
1678
{
1679
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1680
	struct futex_hash_bucket *hb1, *hb2;
1681
	struct futex_q *this, *next;
D
Darren Hart 已提交
1682
	int ret, op_ret;
1683
	DEFINE_WAKE_Q(wake_q);
1684

D
Darren Hart 已提交
1685
retry:
1686
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1687
	if (unlikely(ret != 0))
A
André Almeida 已提交
1688
		return ret;
1689
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1690
	if (unlikely(ret != 0))
A
André Almeida 已提交
1691
		return ret;
1692

1693 1694
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1695

D
Darren Hart 已提交
1696
retry_private:
T
Thomas Gleixner 已提交
1697
	double_lock_hb(hb1, hb2);
1698
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1699
	if (unlikely(op_ret < 0)) {
D
Darren Hart 已提交
1700
		double_unlock_hb(hb1, hb2);
1701

1702 1703 1704 1705 1706 1707
		if (!IS_ENABLED(CONFIG_MMU) ||
		    unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
			/*
			 * we don't get EFAULT from MMU faults if we don't have
			 * an MMU, but we might get them from range checking
			 */
1708
			ret = op_ret;
A
André Almeida 已提交
1709
			return ret;
1710 1711
		}

1712 1713 1714
		if (op_ret == -EFAULT) {
			ret = fault_in_user_writeable(uaddr2);
			if (ret)
A
André Almeida 已提交
1715
				return ret;
1716
		}
1717

1718 1719
		if (!(flags & FLAGS_SHARED)) {
			cond_resched();
D
Darren Hart 已提交
1720
			goto retry_private;
1721
		}
D
Darren Hart 已提交
1722

1723
		cond_resched();
D
Darren Hart 已提交
1724
		goto retry;
1725 1726
	}

J
Jason Low 已提交
1727
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1728
		if (match_futex (&this->key, &key1)) {
1729 1730 1731 1732
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1733
			mark_wake_futex(&wake_q, this);
1734 1735 1736 1737 1738 1739 1740
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
		op_ret = 0;
J
Jason Low 已提交
1741
		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1742
			if (match_futex (&this->key, &key2)) {
1743 1744 1745 1746
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1747
				mark_wake_futex(&wake_q, this);
1748 1749 1750 1751 1752 1753 1754
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1755
out_unlock:
D
Darren Hart 已提交
1756
	double_unlock_hb(hb1, hb2);
1757
	wake_up_q(&wake_q);
1758 1759 1760
	return ret;
}

D
Darren Hart 已提交
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
1779 1780
		hb_waiters_dec(hb1);
		hb_waiters_inc(hb2);
1781
		plist_add(&q->list, &hb2->chain);
D
Darren Hart 已提交
1782 1783 1784 1785 1786
		q->lock_ptr = &hb2->lock;
	}
	q->key = *key2;
}

1787 1788
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1789 1790 1791
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1792 1793 1794 1795 1796
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1797 1798 1799
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1800 1801
 */
static inline
1802 1803
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1804 1805 1806
{
	q->key = *key;

1807
	__unqueue_futex(q);
1808 1809 1810 1811

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1812 1813
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1814
	wake_up_state(q->task, TASK_NORMAL);
1815 1816 1817 1818
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1819 1820 1821 1822 1823 1824
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
T
Thomas Gleixner 已提交
1825 1826
 * @exiting:		Pointer to store the task pointer of the owner task
 *			which is in the middle of exiting
1827
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1828 1829
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1830 1831 1832
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1833
 *
T
Thomas Gleixner 已提交
1834 1835 1836 1837
 * @exiting is only set when the return value is -EBUSY. If so, this holds
 * a refcount on the exiting task on return and the caller needs to drop it
 * after waiting for the exit to complete.
 *
1838
 * Return:
1839 1840 1841
 *  -  0 - failed to acquire the lock atomically;
 *  - >0 - acquired the lock, return value is vpid of the top_waiter
 *  - <0 - error
1842
 */
T
Thomas Gleixner 已提交
1843 1844 1845 1846 1847
static int
futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
			   struct futex_hash_bucket *hb2, union futex_key *key1,
			   union futex_key *key2, struct futex_pi_state **ps,
			   struct task_struct **exiting, int set_waiters)
1848
{
1849
	struct futex_q *top_waiter = NULL;
1850
	u32 curval;
1851
	int ret, vpid;
1852 1853 1854 1855

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1856 1857 1858
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1859 1860 1861 1862 1863 1864 1865 1866
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1867 1868 1869 1870 1871 1872
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1873 1874 1875 1876
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1877
	/*
1878 1879 1880
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1881
	 */
1882
	vpid = task_pid_vnr(top_waiter->task);
1883
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
T
Thomas Gleixner 已提交
1884
				   exiting, set_waiters);
1885
	if (ret == 1) {
1886
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1887 1888
		return vpid;
	}
1889 1890 1891 1892 1893
	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1894
 * @uaddr1:	source futex user address
1895
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1896 1897 1898 1899 1900
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1901
 *		pi futex (pi to pi requeue is not supported)
1902 1903 1904 1905
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1906
 * Return:
1907 1908
 *  - >=0 - on success, the number of tasks requeued or woken;
 *  -  <0 - on error
L
Linus Torvalds 已提交
1909
 */
1910 1911 1912
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
1913
{
1914
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1915
	int task_count = 0, ret;
1916
	struct futex_pi_state *pi_state = NULL;
1917
	struct futex_hash_bucket *hb1, *hb2;
L
Linus Torvalds 已提交
1918
	struct futex_q *this, *next;
1919
	DEFINE_WAKE_Q(wake_q);
1920

1921 1922 1923
	if (nr_wake < 0 || nr_requeue < 0)
		return -EINVAL;

1924 1925 1926 1927 1928 1929 1930 1931 1932
	/*
	 * When PI not supported: return -ENOSYS if requeue_pi is true,
	 * consequently the compiler knows requeue_pi is always false past
	 * this point which will optimize away all the conditional code
	 * further down.
	 */
	if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
		return -ENOSYS;

1933
	if (requeue_pi) {
1934 1935 1936 1937 1938 1939 1940
		/*
		 * Requeue PI only works on two distinct uaddrs. This
		 * check is only valid for private futexes. See below.
		 */
		if (uaddr1 == uaddr2)
			return -EINVAL;

1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
1960

1961
retry:
1962
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
L
Linus Torvalds 已提交
1963
	if (unlikely(ret != 0))
A
André Almeida 已提交
1964
		return ret;
1965
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1966
			    requeue_pi ? FUTEX_WRITE : FUTEX_READ);
L
Linus Torvalds 已提交
1967
	if (unlikely(ret != 0))
A
André Almeida 已提交
1968
		return ret;
L
Linus Torvalds 已提交
1969

1970 1971 1972 1973
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
A
André Almeida 已提交
1974 1975
	if (requeue_pi && match_futex(&key1, &key2))
		return -EINVAL;
1976

1977 1978
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
1979

D
Darren Hart 已提交
1980
retry_private:
1981
	hb_waiters_inc(hb2);
I
Ingo Molnar 已提交
1982
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1983

1984 1985
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
1986

1987
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
1988 1989

		if (unlikely(ret)) {
D
Darren Hart 已提交
1990
			double_unlock_hb(hb1, hb2);
1991
			hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
1992

1993
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
1994
			if (ret)
A
André Almeida 已提交
1995
				return ret;
L
Linus Torvalds 已提交
1996

1997
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1998
				goto retry_private;
L
Linus Torvalds 已提交
1999

D
Darren Hart 已提交
2000
			goto retry;
L
Linus Torvalds 已提交
2001
		}
2002
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
2003 2004 2005 2006 2007
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

2008
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
T
Thomas Gleixner 已提交
2009 2010
		struct task_struct *exiting = NULL;

2011 2012 2013 2014 2015 2016
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
2017
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
T
Thomas Gleixner 已提交
2018 2019
						 &key2, &pi_state,
						 &exiting, nr_requeue);
2020 2021 2022 2023 2024

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
2025 2026
		 * reference to it. If the lock was taken, ret contains the
		 * vpid of the top waiter task.
2027 2028
		 * If the lock was not taken, we have pi_state and an initial
		 * refcount on it. In case of an error we have nothing.
2029
		 */
2030
		if (ret > 0) {
2031 2032
			WARN_ON(pi_state);
			task_count++;
2033
			/*
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
			 * If we acquired the lock, then the user space value
			 * of uaddr2 should be vpid. It cannot be changed by
			 * the top waiter as it is blocked on hb2 lock if it
			 * tries to do so. If something fiddled with it behind
			 * our back the pi state lookup might unearth it. So
			 * we rather use the known value than rereading and
			 * handing potential crap to lookup_pi_state.
			 *
			 * If that call succeeds then we have pi_state and an
			 * initial refcount on it.
2044
			 */
T
Thomas Gleixner 已提交
2045 2046
			ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
					      &pi_state, &exiting);
2047 2048 2049 2050
		}

		switch (ret) {
		case 0:
2051
			/* We hold a reference on the pi state. */
2052
			break;
2053 2054

			/* If the above failed, then pi_state is NULL */
2055 2056
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
2057
			hb_waiters_dec(hb2);
2058
			ret = fault_in_user_writeable(uaddr2);
2059 2060
			if (!ret)
				goto retry;
A
André Almeida 已提交
2061
			return ret;
2062
		case -EBUSY:
2063
		case -EAGAIN:
2064 2065
			/*
			 * Two reasons for this:
2066
			 * - EBUSY: Owner is exiting and we just wait for the
2067
			 *   exit to complete.
2068
			 * - EAGAIN: The user space value changed.
2069
			 */
2070
			double_unlock_hb(hb1, hb2);
2071
			hb_waiters_dec(hb2);
T
Thomas Gleixner 已提交
2072 2073 2074 2075 2076 2077
			/*
			 * Handle the case where the owner is in the middle of
			 * exiting. Wait for the exit to complete otherwise
			 * this task might loop forever, aka. live lock.
			 */
			wait_for_owner_exiting(ret, exiting);
2078 2079 2080 2081 2082 2083 2084
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

J
Jason Low 已提交
2085
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2086 2087 2088 2089
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
2090
			continue;
2091

2092 2093 2094
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
2095 2096 2097
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
2098 2099
		 */
		if ((requeue_pi && !this->rt_waiter) ||
2100 2101
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
2102 2103 2104
			ret = -EINVAL;
			break;
		}
2105 2106 2107 2108 2109 2110 2111

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
2112
			mark_wake_futex(&wake_q, this);
2113 2114
			continue;
		}
L
Linus Torvalds 已提交
2115

2116 2117 2118 2119 2120 2121
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

2122 2123 2124 2125 2126
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
2127 2128 2129 2130 2131
			/*
			 * Prepare the waiter to take the rt_mutex. Take a
			 * refcount on the pi_state and store the pointer in
			 * the futex_q object of the waiter.
			 */
P
Peter Zijlstra 已提交
2132
			get_pi_state(pi_state);
2133 2134 2135
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
2136
							this->task);
2137
			if (ret == 1) {
2138 2139 2140 2141 2142 2143 2144 2145
				/*
				 * We got the lock. We do neither drop the
				 * refcount on pi_state nor clear
				 * this->pi_state because the waiter needs the
				 * pi_state for cleaning up the user space
				 * value. It will drop the refcount after
				 * doing so.
				 */
2146
				requeue_pi_wake_futex(this, &key2, hb2);
2147 2148
				continue;
			} else if (ret) {
2149 2150 2151 2152 2153 2154 2155 2156
				/*
				 * rt_mutex_start_proxy_lock() detected a
				 * potential deadlock when we tried to queue
				 * that waiter. Drop the pi_state reference
				 * which we took above and remove the pointer
				 * to the state from the waiters futex_q
				 * object.
				 */
2157
				this->pi_state = NULL;
2158
				put_pi_state(pi_state);
2159 2160 2161 2162 2163
				/*
				 * We stop queueing more waiters and let user
				 * space deal with the mess.
				 */
				break;
2164
			}
L
Linus Torvalds 已提交
2165
		}
2166
		requeue_futex(this, hb1, hb2, &key2);
L
Linus Torvalds 已提交
2167 2168
	}

2169 2170 2171 2172 2173
	/*
	 * We took an extra initial reference to the pi_state either
	 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
	 * need to drop it here again.
	 */
2174
	put_pi_state(pi_state);
2175 2176

out_unlock:
D
Darren Hart 已提交
2177
	double_unlock_hb(hb1, hb2);
2178
	wake_up_q(&wake_q);
2179
	hb_waiters_dec(hb2);
2180
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
2181 2182 2183
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
2184
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2185
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
2186
{
2187
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
2188

2189
	hb = hash_futex(&q->key);
2190 2191 2192 2193 2194 2195 2196 2197 2198

	/*
	 * Increment the counter before taking the lock so that
	 * a potential waker won't miss a to-be-slept task that is
	 * waiting for the spinlock. This is safe as all queue_lock()
	 * users end up calling queue_me(). Similarly, for housekeeping,
	 * decrement the counter at queue_unlock() when some error has
	 * occurred and we don't end up adding the task to the list.
	 */
D
Davidlohr Bueso 已提交
2199
	hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2200

2201
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
2202

D
Davidlohr Bueso 已提交
2203
	spin_lock(&hb->lock);
2204
	return hb;
L
Linus Torvalds 已提交
2205 2206
}

2207
static inline void
J
Jason Low 已提交
2208
queue_unlock(struct futex_hash_bucket *hb)
2209
	__releases(&hb->lock)
2210 2211
{
	spin_unlock(&hb->lock);
2212
	hb_waiters_dec(hb);
2213 2214
}

2215
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
L
Linus Torvalds 已提交
2216
{
P
Pierre Peiffer 已提交
2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
2231
	q->task = current;
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
	__releases(&hb->lock)
{
	__queue_me(q, hb);
2250
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
2251 2252
}

2253 2254 2255 2256 2257 2258 2259
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
2260
 * Return:
2261 2262
 *  - 1 - if the futex_q was still queued (and we removed unqueued it);
 *  - 0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
2263 2264 2265 2266
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
2267
	int ret = 0;
L
Linus Torvalds 已提交
2268 2269

	/* In the common case we don't take the spinlock, which is nice. */
2270
retry:
2271 2272 2273 2274 2275 2276
	/*
	 * q->lock_ptr can change between this read and the following spin_lock.
	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
	 * optimizing lock_ptr out of the logic below.
	 */
	lock_ptr = READ_ONCE(q->lock_ptr);
2277
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
2296
		__unqueue_futex(q);
2297 2298 2299

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
2300 2301 2302 2303 2304 2305 2306
		spin_unlock(lock_ptr);
		ret = 1;
	}

	return ret;
}

2307 2308
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
2309 2310
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
2311
 */
P
Pierre Peiffer 已提交
2312
static void unqueue_me_pi(struct futex_q *q)
2313
	__releases(q->lock_ptr)
2314
{
2315
	__unqueue_futex(q);
2316 2317

	BUG_ON(!q->pi_state);
2318
	put_pi_state(q->pi_state);
2319 2320
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
2321
	spin_unlock(q->lock_ptr);
2322 2323
}

2324
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2325
				struct task_struct *argowner)
P
Pierre Peiffer 已提交
2326 2327
{
	struct futex_pi_state *pi_state = q->pi_state;
2328
	u32 uval, curval, newval;
2329 2330
	struct task_struct *oldowner, *newowner;
	u32 newtid;
2331
	int ret, err = 0;
P
Pierre Peiffer 已提交
2332

2333 2334
	lockdep_assert_held(q->lock_ptr);

P
Peter Zijlstra 已提交
2335 2336 2337
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	oldowner = pi_state->owner;
2338 2339

	/*
2340
	 * We are here because either:
2341
	 *
2342 2343 2344 2345 2346 2347 2348 2349 2350
	 *  - we stole the lock and pi_state->owner needs updating to reflect
	 *    that (@argowner == current),
	 *
	 * or:
	 *
	 *  - someone stole our lock and we need to fix things to point to the
	 *    new owner (@argowner == NULL).
	 *
	 * Either way, we have to replace the TID in the user space variable.
2351
	 * This must be atomic as we have to preserve the owner died bit here.
2352
	 *
D
Darren Hart 已提交
2353 2354 2355
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
2356
	 *
P
Peter Zijlstra 已提交
2357 2358 2359 2360
	 * Modifying pi_state _before_ the user space value would leave the
	 * pi_state in an inconsistent state when we fault here, because we
	 * need to drop the locks to handle the fault. This might be observed
	 * in the PID check in lookup_pi_state.
2361 2362
	 */
retry:
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
	if (!argowner) {
		if (oldowner != current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}

		if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
			/* We got the lock after all, nothing to fix. */
			ret = 0;
			goto out_unlock;
		}

		/*
		 * Since we just failed the trylock; there must be an owner.
		 */
		newowner = rt_mutex_owner(&pi_state->pi_mutex);
		BUG_ON(!newowner);
	} else {
		WARN_ON_ONCE(argowner != current);
		if (oldowner == current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}
		newowner = argowner;
	}

	newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Peter Zijlstra 已提交
2398 2399 2400
	/* Owner died? */
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;
2401

2402 2403 2404
	err = get_futex_value_locked(&uval, uaddr);
	if (err)
		goto handle_err;
2405

2406
	for (;;) {
2407 2408
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

2409 2410 2411 2412
		err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
		if (err)
			goto handle_err;

2413 2414 2415 2416 2417 2418 2419 2420 2421
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
2422
	if (pi_state->owner != NULL) {
P
Peter Zijlstra 已提交
2423
		raw_spin_lock(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
2424 2425
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
P
Peter Zijlstra 已提交
2426
		raw_spin_unlock(&pi_state->owner->pi_lock);
2427
	}
P
Pierre Peiffer 已提交
2428

2429
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
2430

P
Peter Zijlstra 已提交
2431
	raw_spin_lock(&newowner->pi_lock);
P
Pierre Peiffer 已提交
2432
	WARN_ON(!list_empty(&pi_state->list));
2433
	list_add(&pi_state->list, &newowner->pi_state_list);
P
Peter Zijlstra 已提交
2434 2435 2436
	raw_spin_unlock(&newowner->pi_lock);
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

2437
	return 0;
P
Pierre Peiffer 已提交
2438 2439

	/*
2440 2441 2442 2443 2444 2445 2446
	 * In order to reschedule or handle a page fault, we need to drop the
	 * locks here. In the case of a fault, this gives the other task
	 * (either the highest priority waiter itself or the task which stole
	 * the rtmutex) the chance to try the fixup of the pi_state. So once we
	 * are back from handling the fault we need to check the pi_state after
	 * reacquiring the locks and before trying to do another fixup. When
	 * the fixup has been done already we simply return.
P
Peter Zijlstra 已提交
2447 2448 2449 2450
	 *
	 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
	 * drop hb->lock since the caller owns the hb -> futex_q relation.
	 * Dropping the pi_mutex->wait_lock requires the state revalidate.
P
Pierre Peiffer 已提交
2451
	 */
2452
handle_err:
P
Peter Zijlstra 已提交
2453
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2454
	spin_unlock(q->lock_ptr);
2455

2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470
	switch (err) {
	case -EFAULT:
		ret = fault_in_user_writeable(uaddr);
		break;

	case -EAGAIN:
		cond_resched();
		ret = 0;
		break;

	default:
		WARN_ON_ONCE(1);
		ret = err;
		break;
	}
2471

2472
	spin_lock(q->lock_ptr);
P
Peter Zijlstra 已提交
2473
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2474

2475 2476 2477
	/*
	 * Check if someone else fixed it for us:
	 */
P
Peter Zijlstra 已提交
2478 2479 2480 2481
	if (pi_state->owner != oldowner) {
		ret = 0;
		goto out_unlock;
	}
2482 2483

	if (ret)
P
Peter Zijlstra 已提交
2484
		goto out_unlock;
2485 2486

	goto retry;
P
Peter Zijlstra 已提交
2487 2488 2489 2490

out_unlock:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
P
Pierre Peiffer 已提交
2491 2492
}

N
Nick Piggin 已提交
2493
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
2494

2495 2496 2497 2498 2499 2500 2501 2502 2503 2504
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
2505
 * Return:
2506 2507 2508
 *  -  1 - success, lock taken;
 *  -  0 - success, lock not taken;
 *  - <0 - on error (-EFAULT)
2509
 */
2510
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2511 2512 2513 2514 2515 2516 2517
{
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
2518
		 *
2519 2520 2521
		 * Speculative pi_state->owner read (we don't hold wait_lock);
		 * since we own the lock pi_state->owner == current is the
		 * stable state, anything else needs more attention.
2522 2523
		 */
		if (q->pi_state->owner != current)
2524
			ret = fixup_pi_state_owner(uaddr, q, current);
A
André Almeida 已提交
2525
		return ret ? ret : locked;
2526 2527
	}

2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
	/*
	 * If we didn't get the lock; check if anybody stole it from us. In
	 * that case, we need to fix up the uval to point to them instead of
	 * us, otherwise bad things happen. [10]
	 *
	 * Another speculative read; pi_state->owner == current is unstable
	 * but needs our attention.
	 */
	if (q->pi_state->owner == current) {
		ret = fixup_pi_state_owner(uaddr, q, NULL);
A
André Almeida 已提交
2538
		return ret;
2539 2540
	}

2541 2542
	/*
	 * Paranoia check. If we did not take the lock, then we should not be
2543
	 * the owner of the rt_mutex.
2544
	 */
2545
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
2546 2547 2548 2549
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);
2550
	}
2551

A
André Almeida 已提交
2552
	return ret;
2553 2554
}

2555 2556 2557 2558 2559 2560 2561
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
2562
				struct hrtimer_sleeper *timeout)
2563
{
2564 2565
	/*
	 * The task state is guaranteed to be set before another task can
2566
	 * wake it. set_current_state() is implemented using smp_store_mb() and
2567 2568 2569
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
2570
	set_current_state(TASK_INTERRUPTIBLE);
2571
	queue_me(q, hb);
2572 2573

	/* Arm the timer */
2574
	if (timeout)
2575
		hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
2576 2577

	/*
2578 2579
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
2580 2581 2582 2583 2584 2585 2586 2587
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
2588
			freezable_schedule();
2589 2590 2591 2592
	}
	__set_current_state(TASK_RUNNING);
}

2593 2594 2595 2596
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
2597
 * @flags:	futex flags (FLAGS_SHARED, etc.)
2598 2599 2600 2601 2602 2603 2604 2605
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
2606
 * Return:
2607 2608
 *  -  0 - uaddr contains val and hb has been locked;
 *  - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2609
 */
2610
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2611
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
2612
{
2613 2614
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
2615 2616

	/*
D
Darren Hart 已提交
2617
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
2618 2619 2620 2621 2622 2623 2624
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
2625 2626
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
2627 2628
	 * cond(var) false, which would violate the guarantee.
	 *
2629 2630 2631 2632
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
2633
	 */
2634
retry:
2635
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2636
	if (unlikely(ret != 0))
2637
		return ret;
2638 2639 2640 2641

retry_private:
	*hb = queue_lock(q);

2642
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
2643

2644
	if (ret) {
J
Jason Low 已提交
2645
		queue_unlock(*hb);
L
Linus Torvalds 已提交
2646

2647
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
2648
		if (ret)
A
André Almeida 已提交
2649
			return ret;
L
Linus Torvalds 已提交
2650

2651
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2652 2653 2654
			goto retry_private;

		goto retry;
L
Linus Torvalds 已提交
2655
	}
2656

2657
	if (uval != val) {
J
Jason Low 已提交
2658
		queue_unlock(*hb);
2659
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
2660
	}
L
Linus Torvalds 已提交
2661

2662 2663 2664
	return ret;
}

2665 2666
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
2667
{
2668
	struct hrtimer_sleeper timeout, *to;
2669 2670
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
2671
	struct futex_q q = futex_q_init;
2672 2673 2674 2675 2676 2677
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

2678 2679
	to = futex_setup_timer(abs_time, &timeout, flags,
			       current->timer_slack_ns);
T
Thomas Gleixner 已提交
2680
retry:
2681 2682 2683 2684
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
2685
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2686 2687 2688
	if (ret)
		goto out;

2689
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
2690
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
2691 2692

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
2693
	ret = 0;
2694
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
2695
	if (!unqueue_me(&q))
2696
		goto out;
P
Peter Zijlstra 已提交
2697
	ret = -ETIMEDOUT;
2698
	if (to && !to->task)
2699
		goto out;
N
Nick Piggin 已提交
2700

2701
	/*
T
Thomas Gleixner 已提交
2702 2703
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
2704
	 */
2705
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
2706 2707
		goto retry;

P
Peter Zijlstra 已提交
2708
	ret = -ERESTARTSYS;
2709
	if (!abs_time)
2710
		goto out;
L
Linus Torvalds 已提交
2711

2712
	restart = &current->restart_block;
P
Peter Zijlstra 已提交
2713
	restart->fn = futex_wait_restart;
2714
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
2715
	restart->futex.val = val;
T
Thomas Gleixner 已提交
2716
	restart->futex.time = *abs_time;
P
Peter Zijlstra 已提交
2717
	restart->futex.bitset = bitset;
2718
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2719

P
Peter Zijlstra 已提交
2720 2721
	ret = -ERESTART_RESTARTBLOCK;

2722
out:
2723 2724 2725 2726
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
2727 2728 2729
	return ret;
}

N
Nick Piggin 已提交
2730 2731 2732

static long futex_wait_restart(struct restart_block *restart)
{
2733
	u32 __user *uaddr = restart->futex.uaddr;
2734
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
2735

2736
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
T
Thomas Gleixner 已提交
2737
		t = restart->futex.time;
2738 2739
		tp = &t;
	}
N
Nick Piggin 已提交
2740
	restart->fn = do_no_restart_syscall;
2741 2742 2743

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
2744 2745 2746
}


2747 2748 2749
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
2750 2751 2752 2753 2754
 * if there are waiters then it will block as a consequence of relying
 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
 * a 0 value of the futex too.).
 *
 * Also serves as futex trylock_pi()'ing, and due semantics.
2755
 */
2756
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2757
			 ktime_t *time, int trylock)
2758
{
2759
	struct hrtimer_sleeper timeout, *to;
2760
	struct futex_pi_state *pi_state = NULL;
T
Thomas Gleixner 已提交
2761
	struct task_struct *exiting = NULL;
2762
	struct rt_mutex_waiter rt_waiter;
2763
	struct futex_hash_bucket *hb;
2764
	struct futex_q q = futex_q_init;
2765
	int res, ret;
2766

2767 2768 2769
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2770 2771 2772
	if (refill_pi_state_cache())
		return -ENOMEM;

2773
	to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
2774

2775
retry:
2776
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2777
	if (unlikely(ret != 0))
2778
		goto out;
2779

D
Darren Hart 已提交
2780
retry_private:
E
Eric Sesterhenn 已提交
2781
	hb = queue_lock(&q);
2782

T
Thomas Gleixner 已提交
2783 2784
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
				   &exiting, 0);
2785
	if (unlikely(ret)) {
2786 2787 2788 2789
		/*
		 * Atomic work succeeded and we got the lock,
		 * or failed. Either way, we do _not_ block.
		 */
2790
		switch (ret) {
2791 2792 2793 2794 2795 2796
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2797
		case -EBUSY:
2798 2799
		case -EAGAIN:
			/*
2800
			 * Two reasons for this:
2801
			 * - EBUSY: Task is exiting and we just wait for the
2802
			 *   exit to complete.
2803
			 * - EAGAIN: The user space value changed.
2804
			 */
J
Jason Low 已提交
2805
			queue_unlock(hb);
T
Thomas Gleixner 已提交
2806 2807 2808 2809 2810 2811
			/*
			 * Handle the case where the owner is in the middle of
			 * exiting. Wait for the exit to complete otherwise
			 * this task might loop forever, aka. live lock.
			 */
			wait_for_owner_exiting(ret, exiting);
2812 2813 2814
			cond_resched();
			goto retry;
		default:
2815
			goto out_unlock_put_key;
2816 2817 2818
		}
	}

2819 2820
	WARN_ON(!q.pi_state);

2821 2822 2823
	/*
	 * Only actually queue now that the atomic ops are done:
	 */
2824
	__queue_me(&q, hb);
2825

2826
	if (trylock) {
2827
		ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2828 2829
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
2830
		goto no_block;
2831 2832
	}

2833 2834
	rt_mutex_init_waiter(&rt_waiter);

2835
	/*
2836 2837 2838 2839 2840 2841 2842
	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
	 * hold it while doing rt_mutex_start_proxy(), because then it will
	 * include hb->lock in the blocking chain, even through we'll not in
	 * fact hold it while blocking. This will lead it to report -EDEADLK
	 * and BUG when futex_unlock_pi() interleaves with this.
	 *
	 * Therefore acquire wait_lock while holding hb->lock, but drop the
2843 2844 2845 2846
	 * latter before calling __rt_mutex_start_proxy_lock(). This
	 * interleaves with futex_unlock_pi() -- which does a similar lock
	 * handoff -- such that the latter can observe the futex_q::pi_state
	 * before __rt_mutex_start_proxy_lock() is done.
2847
	 */
2848 2849
	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
	spin_unlock(q.lock_ptr);
2850 2851 2852 2853 2854
	/*
	 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
	 * such that futex_unlock_pi() is guaranteed to observe the waiter when
	 * it sees the futex_q::pi_state.
	 */
2855 2856 2857
	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);

2858 2859 2860
	if (ret) {
		if (ret == 1)
			ret = 0;
2861
		goto cleanup;
2862 2863 2864
	}

	if (unlikely(to))
2865
		hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
2866 2867 2868

	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);

2869
cleanup:
2870
	spin_lock(q.lock_ptr);
2871
	/*
2872
	 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2873
	 * first acquire the hb->lock before removing the lock from the
2874 2875
	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
	 * lists consistent.
2876 2877 2878
	 *
	 * In particular; it is important that futex_unlock_pi() can not
	 * observe this inconsistency.
2879 2880 2881 2882 2883
	 */
	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
		ret = 0;

no_block:
2884 2885 2886 2887
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
2888
	res = fixup_owner(uaddr, &q, !ret);
2889 2890 2891 2892 2893 2894
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
2895

2896
	/*
2897 2898
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
2899
	 */
2900 2901 2902 2903
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
		pi_state = q.pi_state;
		get_pi_state(pi_state);
	}
2904

2905 2906
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
2907

2908 2909 2910 2911 2912
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

A
André Almeida 已提交
2913
	goto out;
2914

2915
out_unlock_put_key:
J
Jason Low 已提交
2916
	queue_unlock(hb);
2917

2918
out:
2919 2920
	if (to) {
		hrtimer_cancel(&to->timer);
2921
		destroy_hrtimer_on_stack(&to->timer);
2922
	}
2923
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2924

2925
uaddr_faulted:
J
Jason Low 已提交
2926
	queue_unlock(hb);
2927

2928
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
2929
	if (ret)
A
André Almeida 已提交
2930
		goto out;
2931

2932
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2933 2934 2935
		goto retry_private;

	goto retry;
2936 2937 2938 2939 2940 2941 2942
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
2943
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2944
{
2945
	u32 curval, uval, vpid = task_pid_vnr(current);
2946
	union futex_key key = FUTEX_KEY_INIT;
2947
	struct futex_hash_bucket *hb;
2948
	struct futex_q *top_waiter;
D
Darren Hart 已提交
2949
	int ret;
2950

2951 2952 2953
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2954 2955 2956 2957 2958 2959
retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
2960
	if ((uval & FUTEX_TID_MASK) != vpid)
2961 2962
		return -EPERM;

2963
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
2964 2965
	if (ret)
		return ret;
2966 2967 2968 2969 2970

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
2971 2972 2973
	 * Check waiters first. We do not trust user space values at
	 * all and we at least want to know if user space fiddled
	 * with the futex value instead of blindly unlocking.
2974
	 */
2975 2976
	top_waiter = futex_top_waiter(hb, &key);
	if (top_waiter) {
2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989
		struct futex_pi_state *pi_state = top_waiter->pi_state;

		ret = -EINVAL;
		if (!pi_state)
			goto out_unlock;

		/*
		 * If current does not own the pi_state then the futex is
		 * inconsistent and user space fiddled with the futex value.
		 */
		if (pi_state->owner != current)
			goto out_unlock;

2990
		get_pi_state(pi_state);
2991
		/*
2992 2993 2994 2995
		 * By taking wait_lock while still holding hb->lock, we ensure
		 * there is no point where we hold neither; and therefore
		 * wake_futex_pi() must observe a state consistent with what we
		 * observed.
2996 2997 2998 2999
		 *
		 * In particular; this forces __rt_mutex_start_proxy() to
		 * complete such that we're guaranteed to observe the
		 * rt_waiter. Also see the WARN in wake_futex_pi().
3000
		 */
3001
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3002 3003
		spin_unlock(&hb->lock);

3004
		/* drops pi_state->pi_mutex.wait_lock */
3005 3006 3007 3008 3009 3010
		ret = wake_futex_pi(uaddr, uval, pi_state);

		put_pi_state(pi_state);

		/*
		 * Success, we're done! No tricky corner cases.
3011 3012 3013
		 */
		if (!ret)
			goto out_putkey;
3014
		/*
3015 3016
		 * The atomic access to the futex value generated a
		 * pagefault, so retry the user-access and the wakeup:
3017 3018 3019
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
3020 3021 3022 3023
		/*
		 * A unconditional UNLOCK_PI op raced against a waiter
		 * setting the FUTEX_WAITERS bit. Try again.
		 */
3024 3025
		if (ret == -EAGAIN)
			goto pi_retry;
3026 3027 3028 3029
		/*
		 * wake_futex_pi has detected invalid state. Tell user
		 * space.
		 */
3030
		goto out_putkey;
3031
	}
3032

3033
	/*
3034 3035 3036 3037 3038
	 * We have no kernel internal state, i.e. no waiters in the
	 * kernel. Waiters which are about to queue themselves are stuck
	 * on hb->lock. So we can safely ignore them. We do neither
	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
	 * owner.
3039
	 */
3040
	if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3041
		spin_unlock(&hb->lock);
3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052
		switch (ret) {
		case -EFAULT:
			goto pi_faulted;

		case -EAGAIN:
			goto pi_retry;

		default:
			WARN_ON_ONCE(1);
			goto out_putkey;
		}
3053
	}
3054

3055 3056 3057 3058 3059
	/*
	 * If uval has changed, let user space handle it.
	 */
	ret = (curval == uval) ? 0 : -EAGAIN;

3060 3061
out_unlock:
	spin_unlock(&hb->lock);
3062
out_putkey:
3063 3064
	return ret;

3065 3066 3067 3068
pi_retry:
	cond_resched();
	goto retry;

3069 3070
pi_faulted:

3071
	ret = fault_in_user_writeable(uaddr);
3072
	if (!ret)
3073 3074
		goto retry;

L
Linus Torvalds 已提交
3075 3076 3077
	return ret;
}

3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
3090
 * Return:
3091 3092
 *  -  0 = no early wakeup detected;
 *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
3114
		plist_del(&q->list, &hb->chain);
3115
		hb_waiters_dec(hb);
3116

T
Thomas Gleixner 已提交
3117
		/* Handle spurious wakeups gracefully */
3118
		ret = -EWOULDBLOCK;
3119 3120
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
3121
		else if (signal_pending(current))
3122
			ret = -ERESTARTNOINTR;
3123 3124 3125 3126 3127 3128
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3129
 * @uaddr:	the futex we initially wait on (non-pi)
3130
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3131
 *		the same type, no requeueing from private to shared, etc.
3132 3133
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
3134
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
3135 3136 3137
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3138 3139 3140 3141 3142
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
3143 3144
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3145
 * via the following--
3146
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3147 3148 3149
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
3150
 *
3151
 * If 3, cleanup and return -ERESTARTNOINTR.
3152 3153 3154 3155 3156 3157 3158
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
3159
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3160 3161 3162
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
3163
 * Return:
3164 3165
 *  -  0 - On success;
 *  - <0 - On error
3166
 */
3167
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3168
				 u32 val, ktime_t *abs_time, u32 bitset,
3169
				 u32 __user *uaddr2)
3170
{
3171
	struct hrtimer_sleeper timeout, *to;
3172
	struct futex_pi_state *pi_state = NULL;
3173 3174
	struct rt_mutex_waiter rt_waiter;
	struct futex_hash_bucket *hb;
3175 3176
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
3177 3178
	int res, ret;

3179 3180 3181
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

3182 3183 3184
	if (uaddr == uaddr2)
		return -EINVAL;

3185 3186 3187
	if (!bitset)
		return -EINVAL;

3188 3189
	to = futex_setup_timer(abs_time, &timeout, flags,
			       current->timer_slack_ns);
3190 3191 3192 3193 3194

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
3195
	rt_mutex_init_waiter(&rt_waiter);
3196

3197
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3198 3199 3200
	if (unlikely(ret != 0))
		goto out;

3201 3202 3203 3204
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

3205 3206 3207 3208
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
3209
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
3210
	if (ret)
A
André Almeida 已提交
3211
		goto out;
3212

3213 3214 3215 3216 3217
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (match_futex(&q.key, &key2)) {
3218
		queue_unlock(hb);
3219
		ret = -EINVAL;
A
André Almeida 已提交
3220
		goto out;
3221 3222
	}

3223
	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
3224
	futex_wait_queue_me(hb, &q, to);
3225 3226 3227 3228 3229

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
A
André Almeida 已提交
3230
		goto out;
3231 3232 3233 3234 3235

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
3236 3237 3238
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
3239 3240 3241 3242 3243 3244 3245 3246 3247 3248
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
3249
			ret = fixup_pi_state_owner(uaddr2, &q, current);
3250 3251 3252 3253
			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
				pi_state = q.pi_state;
				get_pi_state(pi_state);
			}
3254 3255 3256 3257
			/*
			 * Drop the reference to the pi state which
			 * the requeue_pi() code acquired for us.
			 */
3258
			put_pi_state(q.pi_state);
3259 3260 3261
			spin_unlock(q.lock_ptr);
		}
	} else {
3262 3263
		struct rt_mutex *pi_mutex;

3264 3265 3266 3267 3268
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
3269
		WARN_ON(!q.pi_state);
3270
		pi_mutex = &q.pi_state->pi_mutex;
3271
		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3272 3273

		spin_lock(q.lock_ptr);
3274 3275 3276 3277
		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
			ret = 0;

		debug_rt_mutex_free_waiter(&rt_waiter);
3278 3279 3280 3281
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
3282
		res = fixup_owner(uaddr2, &q, !ret);
3283 3284
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
3285
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3286 3287 3288 3289
		 */
		if (res)
			ret = (res < 0) ? res : 0;

3290 3291 3292 3293 3294
		/*
		 * If fixup_pi_state_owner() faulted and was unable to handle
		 * the fault, unlock the rt_mutex and return the fault to
		 * userspace.
		 */
3295 3296 3297 3298
		if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
			pi_state = q.pi_state;
			get_pi_state(pi_state);
		}
3299

3300 3301 3302 3303
		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

3304 3305 3306 3307 3308
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

3309
	if (ret == -EINTR) {
3310
		/*
3311 3312 3313 3314 3315
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
3316
		 */
3317
		ret = -EWOULDBLOCK;
3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
	}

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

3328 3329 3330 3331 3332 3333 3334
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
3335
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3336 3337 3338 3339 3340 3341 3342 3343
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
3344 3345 3346
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
3347
 */
3348 3349
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
3350
{
3351 3352
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
3365 3366 3367 3368
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
3369
 */
3370 3371 3372
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
3373
{
A
Al Viro 已提交
3374
	struct robust_list_head __user *head;
3375
	unsigned long ret;
3376
	struct task_struct *p;
3377

3378 3379 3380
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

3381 3382 3383
	rcu_read_lock();

	ret = -ESRCH;
3384
	if (!pid)
3385
		p = current;
3386
	else {
3387
		p = find_task_by_vpid(pid);
3388 3389 3390 3391
		if (!p)
			goto err_unlock;
	}

3392
	ret = -EPERM;
3393
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3394 3395 3396 3397 3398
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

3399 3400 3401 3402 3403
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
3404
	rcu_read_unlock();
3405 3406 3407 3408

	return ret;
}

Y
Yang Tao 已提交
3409 3410 3411 3412
/* Constants for the pending_op argument of handle_futex_death */
#define HANDLE_DEATH_PENDING	true
#define HANDLE_DEATH_LIST	false

3413 3414 3415 3416
/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
Y
Yang Tao 已提交
3417 3418
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
			      bool pi, bool pending_op)
3419
{
3420
	u32 uval, nval, mval;
3421
	int err;
3422

3423 3424 3425 3426
	/* Futex address must be 32bit aligned */
	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
		return -1;

3427 3428
retry:
	if (get_user(uval, uaddr))
3429 3430
		return -1;

Y
Yang Tao 已提交
3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466
	/*
	 * Special case for regular (non PI) futexes. The unlock path in
	 * user space has two race scenarios:
	 *
	 * 1. The unlock path releases the user space futex value and
	 *    before it can execute the futex() syscall to wake up
	 *    waiters it is killed.
	 *
	 * 2. A woken up waiter is killed before it can acquire the
	 *    futex in user space.
	 *
	 * In both cases the TID validation below prevents a wakeup of
	 * potential waiters which can cause these waiters to block
	 * forever.
	 *
	 * In both cases the following conditions are met:
	 *
	 *	1) task->robust_list->list_op_pending != NULL
	 *	   @pending_op == true
	 *	2) User space futex value == 0
	 *	3) Regular futex: @pi == false
	 *
	 * If these conditions are met, it is safe to attempt waking up a
	 * potential waiter without touching the user space futex value and
	 * trying to set the OWNER_DIED bit. The user space futex value is
	 * uncontended and the rest of the user space mutex state is
	 * consistent, so a woken waiter will just take over the
	 * uncontended futex. Setting the OWNER_DIED bit would create
	 * inconsistent state and malfunction of the user space owner died
	 * handling.
	 */
	if (pending_op && !pi && !uval) {
		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
		return 0;
	}

3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
		return 0;

	/*
	 * Ok, this dying thread is truly holding a futex
	 * of interest. Set the OWNER_DIED bit atomically
	 * via cmpxchg, and if the value had FUTEX_WAITERS
	 * set, wake up a waiter (if any). (We have to do a
	 * futex_wake() even if OWNER_DIED is already set -
	 * to handle the rare but possible case of recursive
	 * thread-death.) The rest of the cleanup is done in
	 * userspace.
	 */
	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;

	/*
	 * We are not holding a lock here, but we want to have
	 * the pagefault_disable/enable() protection because
	 * we want to handle the fault gracefully. If the
	 * access fails we try to fault in the futex with R/W
	 * verification via get_user_pages. get_user() above
	 * does not guarantee R/W access. If that fails we
	 * give up and leave the futex locked.
	 */
	if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
		switch (err) {
		case -EFAULT:
3494 3495 3496
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
3497 3498 3499

		case -EAGAIN:
			cond_resched();
3500
			goto retry;
3501

3502 3503 3504 3505
		default:
			WARN_ON_ONCE(1);
			return err;
		}
3506
	}
3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517

	if (nval != uval)
		goto retry;

	/*
	 * Wake robust non-PI futexes here. The wakeup of
	 * PI futexes happens in exit_pi_state():
	 */
	if (!pi && (uval & FUTEX_WAITERS))
		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);

3518 3519 3520
	return 0;
}

3521 3522 3523 3524
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
3525
				     struct robust_list __user * __user *head,
3526
				     unsigned int *pi)
3527 3528 3529
{
	unsigned long uentry;

A
Al Viro 已提交
3530
	if (get_user(uentry, (unsigned long __user *)head))
3531 3532
		return -EFAULT;

A
Al Viro 已提交
3533
	*entry = (void __user *)(uentry & ~1UL);
3534 3535 3536 3537 3538
	*pi = uentry & 1;

	return 0;
}

3539 3540 3541 3542 3543 3544
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
3545
static void exit_robust_list(struct task_struct *curr)
3546 3547
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
3548
	struct robust_list __user *entry, *next_entry, *pending;
3549
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3550
	unsigned int next_pi;
3551
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
3552
	int rc;
3553

3554 3555 3556
	if (!futex_cmpxchg_enabled)
		return;

3557 3558 3559 3560
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
3561
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
3572
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3573
		return;
3574

M
Martin Schwidefsky 已提交
3575
	next_entry = NULL;	/* avoid warning with gcc */
3576
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
3577 3578 3579 3580 3581
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3582 3583
		/*
		 * A pending lock might already be on the list, so
3584
		 * don't process it twice:
3585
		 */
Y
Yang Tao 已提交
3586
		if (entry != pending) {
A
Al Viro 已提交
3587
			if (handle_futex_death((void __user *)entry + futex_offset,
Y
Yang Tao 已提交
3588
						curr, pi, HANDLE_DEATH_LIST))
3589
				return;
Y
Yang Tao 已提交
3590
		}
M
Martin Schwidefsky 已提交
3591
		if (rc)
3592
			return;
M
Martin Schwidefsky 已提交
3593 3594
		entry = next_entry;
		pi = next_pi;
3595 3596 3597 3598 3599 3600 3601 3602
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
3603

Y
Yang Tao 已提交
3604
	if (pending) {
M
Martin Schwidefsky 已提交
3605
		handle_futex_death((void __user *)pending + futex_offset,
Y
Yang Tao 已提交
3606 3607
				   curr, pip, HANDLE_DEATH_PENDING);
	}
3608 3609
}

3610
static void futex_cleanup(struct task_struct *tsk)
3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627
{
	if (unlikely(tsk->robust_list)) {
		exit_robust_list(tsk);
		tsk->robust_list = NULL;
	}

#ifdef CONFIG_COMPAT
	if (unlikely(tsk->compat_robust_list)) {
		compat_exit_robust_list(tsk);
		tsk->compat_robust_list = NULL;
	}
#endif

	if (unlikely(!list_empty(&tsk->pi_state_list)))
		exit_pi_state_list(tsk);
}

3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646
/**
 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
 * @tsk:	task to set the state on
 *
 * Set the futex exit state of the task lockless. The futex waiter code
 * observes that state when a task is exiting and loops until the task has
 * actually finished the futex cleanup. The worst case for this is that the
 * waiter runs through the wait loop until the state becomes visible.
 *
 * This is called from the recursive fault handling path in do_exit().
 *
 * This is best effort. Either the futex exit code has run already or
 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
 * take it over. If not, the problem is pushed back to user space. If the
 * futex exit code did not run yet, then an already queued waiter might
 * block forever, but there is nothing which can be done about that.
 */
void futex_exit_recursive(struct task_struct *tsk)
{
3647 3648 3649
	/* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
	if (tsk->futex_state == FUTEX_STATE_EXITING)
		mutex_unlock(&tsk->futex_exit_mutex);
3650 3651 3652
	tsk->futex_state = FUTEX_STATE_DEAD;
}

3653
static void futex_cleanup_begin(struct task_struct *tsk)
3654
{
3655 3656 3657 3658 3659 3660 3661 3662
	/*
	 * Prevent various race issues against a concurrent incoming waiter
	 * including live locks by forcing the waiter to block on
	 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
	 * attach_to_pi_owner().
	 */
	mutex_lock(&tsk->futex_exit_mutex);

3663
	/*
3664 3665 3666 3667 3668 3669 3670 3671 3672
	 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
	 *
	 * This ensures that all subsequent checks of tsk->futex_state in
	 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
	 * tsk->pi_lock held.
	 *
	 * It guarantees also that a pi_state which was queued right before
	 * the state change under tsk->pi_lock by a concurrent waiter must
	 * be observed in exit_pi_state_list().
3673 3674
	 */
	raw_spin_lock_irq(&tsk->pi_lock);
3675
	tsk->futex_state = FUTEX_STATE_EXITING;
3676
	raw_spin_unlock_irq(&tsk->pi_lock);
3677
}
3678

3679 3680 3681 3682 3683 3684 3685
static void futex_cleanup_end(struct task_struct *tsk, int state)
{
	/*
	 * Lockless store. The only side effect is that an observer might
	 * take another loop until it becomes visible.
	 */
	tsk->futex_state = state;
3686 3687 3688 3689 3690
	/*
	 * Drop the exit protection. This unblocks waiters which observed
	 * FUTEX_STATE_EXITING to reevaluate the state.
	 */
	mutex_unlock(&tsk->futex_exit_mutex);
3691
}
3692

3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715
void futex_exec_release(struct task_struct *tsk)
{
	/*
	 * The state handling is done for consistency, but in the case of
	 * exec() there is no way to prevent futher damage as the PID stays
	 * the same. But for the unlikely and arguably buggy case that a
	 * futex is held on exec(), this provides at least as much state
	 * consistency protection which is possible.
	 */
	futex_cleanup_begin(tsk);
	futex_cleanup(tsk);
	/*
	 * Reset the state to FUTEX_STATE_OK. The task is alive and about
	 * exec a new binary.
	 */
	futex_cleanup_end(tsk, FUTEX_STATE_OK);
}

void futex_exit_release(struct task_struct *tsk)
{
	futex_cleanup_begin(tsk);
	futex_cleanup(tsk);
	futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
3716 3717
}

3718
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3719
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
3720
{
T
Thomas Gleixner 已提交
3721
	int cmd = op & FUTEX_CMD_MASK;
3722
	unsigned int flags = 0;
E
Eric Dumazet 已提交
3723 3724

	if (!(op & FUTEX_PRIVATE_FLAG))
3725
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
3726

3727 3728
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
3729 3730
		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
		    cmd != FUTEX_WAIT_REQUEUE_PI)
3731 3732
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
3733

3734 3735 3736 3737 3738 3739 3740 3741 3742 3743
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
3744
	switch (cmd) {
L
Linus Torvalds 已提交
3745
	case FUTEX_WAIT:
3746
		val3 = FUTEX_BITSET_MATCH_ANY;
3747
		fallthrough;
3748
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
3749
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
3750
	case FUTEX_WAKE:
3751
		val3 = FUTEX_BITSET_MATCH_ANY;
3752
		fallthrough;
3753
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
3754
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
3755
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
3756
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
3757
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
3758
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3759
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
3760
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3761
	case FUTEX_LOCK_PI:
3762
		return futex_lock_pi(uaddr, flags, timeout, 0);
3763
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
3764
		return futex_unlock_pi(uaddr, flags);
3765
	case FUTEX_TRYLOCK_PI:
3766
		return futex_lock_pi(uaddr, flags, NULL, 1);
3767 3768
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
3769 3770
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
3771
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
3772
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
3773
	}
T
Thomas Gleixner 已提交
3774
	return -ENOSYS;
L
Linus Torvalds 已提交
3775 3776 3777
}


3778
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3779
		struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3780
		u32, val3)
L
Linus Torvalds 已提交
3781
{
3782
	struct timespec64 ts;
3783
	ktime_t t, *tp = NULL;
3784
	u32 val2 = 0;
E
Eric Dumazet 已提交
3785
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
3786

3787
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3788 3789
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3790 3791
		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
			return -EFAULT;
3792
		if (get_timespec64(&ts, utime))
L
Linus Torvalds 已提交
3793
			return -EFAULT;
3794
		if (!timespec64_valid(&ts))
3795
			return -EINVAL;
3796

3797
		t = timespec64_to_ktime(ts);
E
Eric Dumazet 已提交
3798
		if (cmd == FUTEX_WAIT)
3799
			t = ktime_add_safe(ktime_get(), t);
3800
		tp = &t;
L
Linus Torvalds 已提交
3801 3802
	}
	/*
3803
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3804
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
3805
	 */
3806
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3807
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3808
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
3809

3810
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
3811 3812
}

3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844
#ifdef CONFIG_COMPAT
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int
compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
		   compat_uptr_t __user *head, unsigned int *pi)
{
	if (get_user(*uentry, head))
		return -EFAULT;

	*entry = compat_ptr((*uentry) & ~1);
	*pi = (unsigned int)(*uentry) & 1;

	return 0;
}

static void __user *futex_uaddr(struct robust_list __user *entry,
				compat_long_t futex_offset)
{
	compat_uptr_t base = ptr_to_compat(entry);
	void __user *uaddr = compat_ptr(base + futex_offset);

	return uaddr;
}

/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
3845
static void compat_exit_robust_list(struct task_struct *curr)
3846 3847 3848 3849
{
	struct compat_robust_list_head __user *head = curr->compat_robust_list;
	struct robust_list __user *entry, *next_entry, *pending;
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3850
	unsigned int next_pi;
3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891
	compat_uptr_t uentry, next_uentry, upending;
	compat_long_t futex_offset;
	int rc;

	if (!futex_cmpxchg_enabled)
		return;

	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
	if (compat_fetch_robust_entry(&upending, &pending,
			       &head->list_op_pending, &pip))
		return;

	next_entry = NULL;	/* avoid warning with gcc */
	while (entry != (struct robust_list __user *) &head->list) {
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
			(compat_uptr_t __user *)&entry->next, &next_pi);
		/*
		 * A pending lock might already be on the list, so
		 * dont process it twice:
		 */
		if (entry != pending) {
			void __user *uaddr = futex_uaddr(entry, futex_offset);

Y
Yang Tao 已提交
3892 3893
			if (handle_futex_death(uaddr, curr, pi,
					       HANDLE_DEATH_LIST))
3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911
				return;
		}
		if (rc)
			return;
		uentry = next_uentry;
		entry = next_entry;
		pi = next_pi;
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
	if (pending) {
		void __user *uaddr = futex_uaddr(pending, futex_offset);

Y
Yang Tao 已提交
3912
		handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968
	}
}

COMPAT_SYSCALL_DEFINE2(set_robust_list,
		struct compat_robust_list_head __user *, head,
		compat_size_t, len)
{
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->compat_robust_list = head;

	return 0;
}

COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
			compat_uptr_t __user *, head_ptr,
			compat_size_t __user *, len_ptr)
{
	struct compat_robust_list_head __user *head;
	unsigned long ret;
	struct task_struct *p;

	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	rcu_read_lock();

	ret = -ESRCH;
	if (!pid)
		p = current;
	else {
		p = find_task_by_vpid(pid);
		if (!p)
			goto err_unlock;
	}

	ret = -EPERM;
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
		goto err_unlock;

	head = p->compat_robust_list;
	rcu_read_unlock();

	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(ptr_to_compat(head), head_ptr);

err_unlock:
	rcu_read_unlock();

	return ret;
}
3969
#endif /* CONFIG_COMPAT */
3970

3971
#ifdef CONFIG_COMPAT_32BIT_TIME
3972
SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
3973 3974 3975
		struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
		u32, val3)
{
3976
	struct timespec64 ts;
3977 3978 3979 3980 3981 3982 3983
	ktime_t t, *tp = NULL;
	int val2 = 0;
	int cmd = op & FUTEX_CMD_MASK;

	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3984
		if (get_old_timespec32(&ts, utime))
3985
			return -EFAULT;
3986
		if (!timespec64_valid(&ts))
3987 3988
			return -EINVAL;

3989
		t = timespec64_to_ktime(ts);
3990 3991 3992 3993 3994 3995 3996 3997 3998 3999
		if (cmd == FUTEX_WAIT)
			t = ktime_add_safe(ktime_get(), t);
		tp = &t;
	}
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
		val2 = (int) (unsigned long) utime;

	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
4000
#endif /* CONFIG_COMPAT_32BIT_TIME */
4001

4002
static void __init futex_detect_cmpxchg(void)
L
Linus Torvalds 已提交
4003
{
4004
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
4005
	u32 curval;
4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023

	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
	 * implementation, the non-functional ones will return
	 * -ENOSYS.
	 */
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
		futex_cmpxchg_enabled = 1;
#endif
}

static int __init futex_init(void)
{
4024
	unsigned int futex_shift;
4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035
	unsigned long i;

#if CONFIG_BASE_SMALL
	futex_hashsize = 16;
#else
	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif

	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
					       futex_hashsize, 0,
					       futex_hashsize < 256 ? HASH_SMALL : 0,
4036 4037 4038
					       &futex_shift, NULL,
					       futex_hashsize, futex_hashsize);
	futex_hashsize = 1UL << futex_shift;
4039 4040

	futex_detect_cmpxchg();
4041

4042
	for (i = 0; i < futex_hashsize; i++) {
4043
		atomic_set(&futex_queues[i].waiters, 0);
4044
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
4045 4046 4047
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
4048 4049
	return 0;
}
4050
core_initcall(futex_init);