futex.c 112.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
12 13 14 15
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
16 17 18 19
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
20 21 22
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
23 24 25 26
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
27 28 29 30 31 32 33
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 */
34
#include <linux/compat.h>
L
Linus Torvalds 已提交
35 36 37 38 39 40 41 42 43 44
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
45
#include <linux/signal.h>
46
#include <linux/export.h>
47
#include <linux/magic.h>
48 49
#include <linux/pid.h>
#include <linux/nsproxy.h>
50
#include <linux/ptrace.h>
51
#include <linux/sched/rt.h>
52
#include <linux/sched/wake_q.h>
53
#include <linux/sched/mm.h>
54
#include <linux/hugetlb.h>
C
Colin Cross 已提交
55
#include <linux/freezer.h>
M
Mike Rapoport 已提交
56
#include <linux/memblock.h>
57
#include <linux/fault-inject.h>
58
#include <linux/refcount.h>
59

60
#include <asm/futex.h>
L
Linus Torvalds 已提交
61

62
#include "locking/rtmutex_common.h"
63

64
/*
65 66 67 68
 * READ this before attempting to hack on futexes!
 *
 * Basic futex operation and ordering guarantees
 * =============================================
69 70 71 72
 *
 * The waiter reads the futex value in user space and calls
 * futex_wait(). This function computes the hash bucket and acquires
 * the hash bucket lock. After that it reads the futex user space value
73 74 75
 * again and verifies that the data has not changed. If it has not changed
 * it enqueues itself into the hash bucket, releases the hash bucket lock
 * and schedules.
76 77
 *
 * The waker side modifies the user space value of the futex and calls
78 79 80
 * futex_wake(). This function computes the hash bucket and acquires the
 * hash bucket lock. Then it looks for waiters on that futex in the hash
 * bucket and wakes them.
81
 *
82 83 84 85 86
 * In futex wake up scenarios where no tasks are blocked on a futex, taking
 * the hb spinlock can be avoided and simply return. In order for this
 * optimization to work, ordering guarantees must exist so that the waiter
 * being added to the list is acknowledged when the list is concurrently being
 * checked by the waker, avoiding scenarios like the following:
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
 *
 * CPU 0                               CPU 1
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
 *   uval = *futex;
 *                                     *futex = newval;
 *                                     sys_futex(WAKE, futex);
 *                                       futex_wake(futex);
 *                                       if (queue_empty())
 *                                         return;
 *   if (uval == val)
 *      lock(hash_bucket(futex));
 *      queue();
 *     unlock(hash_bucket(futex));
 *     schedule();
 *
 * This would cause the waiter on CPU 0 to wait forever because it
 * missed the transition of the user space value from val to newval
 * and the waker did not find the waiter in the hash bucket queue.
 *
108 109 110 111 112
 * The correct serialization ensures that a waiter either observes
 * the changed user space value before blocking or is woken by a
 * concurrent waker:
 *
 * CPU 0                                 CPU 1
113 114 115
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
116
 *
117
 *   waiters++; (a)
118 119 120 121 122 123 124 125 126 127
 *   smp_mb(); (A) <-- paired with -.
 *                                  |
 *   lock(hash_bucket(futex));      |
 *                                  |
 *   uval = *futex;                 |
 *                                  |        *futex = newval;
 *                                  |        sys_futex(WAKE, futex);
 *                                  |          futex_wake(futex);
 *                                  |
 *                                  `--------> smp_mb(); (B)
128
 *   if (uval == val)
129
 *     queue();
130
 *     unlock(hash_bucket(futex));
131 132
 *     schedule();                         if (waiters)
 *                                           lock(hash_bucket(futex));
133 134
 *   else                                    wake_waiters(futex);
 *     waiters--; (b)                        unlock(hash_bucket(futex));
135
 *
136 137
 * Where (A) orders the waiters increment and the futex value read through
 * atomic operations (see hb_waiters_inc) and where (B) orders the write
138 139
 * to futex and the waiters read -- this is done by the barriers for both
 * shared and private futexes in get_futex_key_refs().
140 141 142 143 144 145 146 147 148 149 150 151
 *
 * This yields the following case (where X:=waiters, Y:=futex):
 *
 *	X = Y = 0
 *
 *	w[X]=1		w[Y]=1
 *	MB		MB
 *	r[Y]=y		r[X]=x
 *
 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 * the guarantee that we cannot both miss the futex variable change and the
 * enqueue.
152 153 154 155 156 157 158 159 160 161 162
 *
 * Note that a new waiter is accounted for in (a) even when it is possible that
 * the wait call can return error, in which case we backtrack from it in (b).
 * Refer to the comment in queue_lock().
 *
 * Similarly, in order to account for waiters being requeued on another
 * address we always increment the waiters for the destination bucket before
 * acquiring the lock. It then decrements them again  after releasing it -
 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 * will do the additional required waiter count housekeeping. This is done for
 * double_lock_hb() and double_unlock_hb(), respectively.
163 164
 */

165 166 167 168
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
static int  __read_mostly futex_cmpxchg_enabled;
169
#endif
170

171 172 173 174
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
175 176 177 178 179 180 181 182 183
#ifdef CONFIG_MMU
# define FLAGS_SHARED		0x01
#else
/*
 * NOMMU does not have per process address space. Let the compiler optimize
 * code away.
 */
# define FLAGS_SHARED		0x00
#endif
184 185 186
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
203
	refcount_t refcount;
204 205

	union futex_key key;
206
} __randomize_layout;
207

208 209
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
210
 * @list:		priority-sorted list of tasks waiting on this futex
211 212 213 214 215 216 217 218
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
219
 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
L
Linus Torvalds 已提交
220 221 222
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
223
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
224
 * The order of wakeup is always to make the first condition true, then
225 226 227 228
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
229 230
 */
struct futex_q {
P
Pierre Peiffer 已提交
231
	struct plist_node list;
L
Linus Torvalds 已提交
232

233
	struct task_struct *task;
L
Linus Torvalds 已提交
234 235
	spinlock_t *lock_ptr;
	union futex_key key;
236
	struct futex_pi_state *pi_state;
237
	struct rt_mutex_waiter *rt_waiter;
238
	union futex_key *requeue_pi_key;
239
	u32 bitset;
240
} __randomize_layout;
L
Linus Torvalds 已提交
241

242 243 244 245 246 247
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
248
/*
D
Darren Hart 已提交
249 250 251
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
252 253
 */
struct futex_hash_bucket {
254
	atomic_t waiters;
P
Pierre Peiffer 已提交
255 256
	spinlock_t lock;
	struct plist_head chain;
257
} ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
258

259 260 261 262 263 264 265 266 267 268 269
/*
 * The base of the bucket array and its size are always used together
 * (after initialization only in hash_futex()), so ensure that they
 * reside in the same cacheline.
 */
static struct {
	struct futex_hash_bucket *queues;
	unsigned long            hashsize;
} __futex_data __read_mostly __aligned(2*sizeof(long));
#define futex_queues   (__futex_data.queues)
#define futex_hashsize (__futex_data.hashsize)
270

L
Linus Torvalds 已提交
271

272 273 274 275 276 277 278 279
/*
 * Fault injections for futexes.
 */
#ifdef CONFIG_FAIL_FUTEX

static struct {
	struct fault_attr attr;

280
	bool ignore_private;
281 282
} fail_futex = {
	.attr = FAULT_ATTR_INITIALIZER,
283
	.ignore_private = false,
284 285 286 287 288 289 290 291
};

static int __init setup_fail_futex(char *str)
{
	return setup_fault_attr(&fail_futex.attr, str);
}
__setup("fail_futex=", setup_fail_futex);

292
static bool should_fail_futex(bool fshared)
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
{
	if (fail_futex.ignore_private && !fshared)
		return false;

	return should_fail(&fail_futex.attr, 1);
}

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init fail_futex_debugfs(void)
{
	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
	struct dentry *dir;

	dir = fault_create_debugfs_attr("fail_futex", NULL,
					&fail_futex.attr);
	if (IS_ERR(dir))
		return PTR_ERR(dir);

312 313
	debugfs_create_bool("ignore-private", mode, dir,
			    &fail_futex.ignore_private);
314 315 316 317 318 319 320 321 322 323 324 325 326 327
	return 0;
}

late_initcall(fail_futex_debugfs);

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

#else
static inline bool should_fail_futex(bool fshared)
{
	return false;
}
#endif /* CONFIG_FAIL_FUTEX */

328 329 330 331 332 333
#ifdef CONFIG_COMPAT
static void compat_exit_robust_list(struct task_struct *curr);
#else
static inline void compat_exit_robust_list(struct task_struct *curr) { }
#endif

334 335
static inline void futex_get_mm(union futex_key *key)
{
V
Vegard Nossum 已提交
336
	mmgrab(key->private.mm);
337 338 339
	/*
	 * Ensure futex_get_mm() implies a full barrier such that
	 * get_futex_key() implies a full barrier. This is relied upon
340
	 * as smp_mb(); (B), see the ordering comment above.
341
	 */
342
	smp_mb__after_atomic();
343 344
}

345 346 347 348
/*
 * Reflects a new waiter being added to the waitqueue.
 */
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
349 350
{
#ifdef CONFIG_SMP
351
	atomic_inc(&hb->waiters);
352
	/*
353
	 * Full barrier (A), see the ordering comment above.
354
	 */
355
	smp_mb__after_atomic();
356 357 358 359 360 361 362 363 364 365 366 367 368
#endif
}

/*
 * Reflects a waiter being removed from the waitqueue by wakeup
 * paths.
 */
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	atomic_dec(&hb->waiters);
#endif
}
369

370 371 372 373
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	return atomic_read(&hb->waiters);
374
#else
375
	return 1;
376 377 378
#endif
}

379 380 381 382 383 384
/**
 * hash_futex - Return the hash bucket in the global hash
 * @key:	Pointer to the futex key for which the hash is calculated
 *
 * We hash on the keys returned from get_futex_key (see below) and return the
 * corresponding hash bucket in the global hash.
L
Linus Torvalds 已提交
385 386 387 388 389 390
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
391
	return &futex_queues[hash & (futex_hashsize - 1)];
L
Linus Torvalds 已提交
392 393
}

394 395 396 397 398 399

/**
 * match_futex - Check whether two futex keys are equal
 * @key1:	Pointer to key1
 * @key2:	Pointer to key2
 *
L
Linus Torvalds 已提交
400 401 402 403
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
404 405
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
406 407 408 409
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

410 411 412 413 414 415 416 417 418 419
/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
static void get_futex_key_refs(union futex_key *key)
{
	if (!key->both.ptr)
		return;

420 421 422 423 424 425 426 427 428 429
	/*
	 * On MMU less systems futexes are always "private" as there is no per
	 * process address space. We need the smp wmb nevertheless - yes,
	 * arch/blackfin has MMU less SMP ...
	 */
	if (!IS_ENABLED(CONFIG_MMU)) {
		smp_mb(); /* explicit smp_mb(); (B) */
		return;
	}

430 431
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
432
		ihold(key->shared.inode); /* implies smp_mb(); (B) */
433 434
		break;
	case FUT_OFF_MMSHARED:
435
		futex_get_mm(key); /* implies smp_mb(); (B) */
436
		break;
437
	default:
438 439 440 441 442
		/*
		 * Private futexes do not hold reference on an inode or
		 * mm, therefore the only purpose of calling get_futex_key_refs
		 * is because we need the barrier for the lockless waiter check.
		 */
443
		smp_mb(); /* explicit smp_mb(); (B) */
444 445 446 447 448
	}
}

/*
 * Drop a reference to the resource addressed by a key.
449 450 451
 * The hash bucket spinlock must not be held. This is
 * a no-op for private futexes, see comment in the get
 * counterpart.
452 453 454
 */
static void drop_futex_key_refs(union futex_key *key)
{
455 456 457
	if (!key->both.ptr) {
		/* If we're here then we tried to put a key we failed to get */
		WARN_ON_ONCE(1);
458
		return;
459
	}
460

461 462 463
	if (!IS_ENABLED(CONFIG_MMU))
		return;

464 465 466 467 468 469 470 471 472 473
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
		iput(key->shared.inode);
		break;
	case FUT_OFF_MMSHARED:
		mmdrop(key->private.mm);
		break;
	}
}

474 475 476 477 478
enum futex_access {
	FUTEX_READ,
	FUTEX_WRITE
};

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
/**
 * futex_setup_timer - set up the sleeping hrtimer.
 * @time:	ptr to the given timeout value
 * @timeout:	the hrtimer_sleeper structure to be set up
 * @flags:	futex flags
 * @range_ns:	optional range in ns
 *
 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
 *	   value given
 */
static inline struct hrtimer_sleeper *
futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
		  int flags, u64 range_ns)
{
	if (!time)
		return NULL;

496 497 498
	hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
499 500 501 502 503 504 505 506 507
	/*
	 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
	 * effectively the same as calling hrtimer_set_expires().
	 */
	hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);

	return timeout;
}

E
Eric Dumazet 已提交
508
/**
509 510 511 512
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 * @key:	address where result is stored.
513 514
 * @rw:		mapping needs to be read/write (values: FUTEX_READ,
 *              FUTEX_WRITE)
E
Eric Dumazet 已提交
515
 *
516 517
 * Return: a negative error code or 0
 *
518
 * The key words are stored in @key on success.
L
Linus Torvalds 已提交
519
 *
A
Al Viro 已提交
520
 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
L
Linus Torvalds 已提交
521 522 523
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
D
Darren Hart 已提交
524
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
525
 */
526
static int
527
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw)
L
Linus Torvalds 已提交
528
{
529
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
530
	struct mm_struct *mm = current->mm;
531
	struct page *page, *tail;
532
	struct address_space *mapping;
533
	int err, ro = 0;
L
Linus Torvalds 已提交
534 535 536 537

	/*
	 * The futex address must be "naturally" aligned.
	 */
538
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
539
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
540
		return -EINVAL;
541
	address -= key->both.offset;
L
Linus Torvalds 已提交
542

543
	if (unlikely(!access_ok(uaddr, sizeof(u32))))
544 545
		return -EFAULT;

546 547 548
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

E
Eric Dumazet 已提交
549 550 551 552 553 554 555 556 557 558
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		key->private.mm = mm;
		key->private.address = address;
559
		get_futex_key_refs(key);  /* implies smp_mb(); (B) */
E
Eric Dumazet 已提交
560 561
		return 0;
	}
L
Linus Torvalds 已提交
562

563
again:
564 565 566 567
	/* Ignore any VERIFY_READ mapping (futex common case) */
	if (unlikely(should_fail_futex(fshared)))
		return -EFAULT;

568
	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
569 570 571 572
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
573
	if (err == -EFAULT && rw == FUTEX_READ) {
574 575 576
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
577 578
	if (err < 0)
		return err;
579 580
	else
		err = 0;
581

582 583 584 585 586 587 588 589 590 591
	/*
	 * The treatment of mapping from this point on is critical. The page
	 * lock protects many things but in this context the page lock
	 * stabilizes mapping, prevents inode freeing in the shared
	 * file-backed region case and guards against movement to swap cache.
	 *
	 * Strictly speaking the page lock is not needed in all cases being
	 * considered here and page lock forces unnecessarily serialization
	 * From this point on, mapping will be re-verified if necessary and
	 * page lock will be acquired only if it is unavoidable
592 593 594 595 596 597 598
	 *
	 * Mapping checks require the head page for any compound page so the
	 * head page and mapping is looked up now. For anonymous pages, it
	 * does not matter if the page splits in the future as the key is
	 * based on the address. For filesystem-backed pages, the tail is
	 * required as the index of the page determines the key. For
	 * base pages, there is no tail page and tail == page.
599
	 */
600
	tail = page;
601 602 603
	page = compound_head(page);
	mapping = READ_ONCE(page->mapping);

604
	/*
605
	 * If page->mapping is NULL, then it cannot be a PageAnon
606 607 608 609 610 611 612 613 614 615 616
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
617
	 * an unlikely race, but we do need to retry for page->mapping.
618
	 */
619 620 621 622 623 624 625 626 627 628
	if (unlikely(!mapping)) {
		int shmem_swizzled;

		/*
		 * Page lock is required to identify which special case above
		 * applies. If this is really a shmem page then the page lock
		 * will prevent unexpected transitions.
		 */
		lock_page(page);
		shmem_swizzled = PageSwapCache(page) || page->mapping;
629 630
		unlock_page(page);
		put_page(page);
631

632 633
		if (shmem_swizzled)
			goto again;
634

635
		return -EFAULT;
636
	}
L
Linus Torvalds 已提交
637 638 639 640

	/*
	 * Private mappings are handled in a simple way.
	 *
641 642 643
	 * If the futex key is stored on an anonymous page, then the associated
	 * object is the mm which is implicitly pinned by the calling process.
	 *
L
Linus Torvalds 已提交
644 645
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
646
	 * the object not the particular process.
L
Linus Torvalds 已提交
647
	 */
648
	if (PageAnon(page)) {
649 650 651 652
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
653
		if (unlikely(should_fail_futex(fshared)) || ro) {
654 655 656 657
			err = -EFAULT;
			goto out;
		}

658
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
659
		key->private.mm = mm;
660
		key->private.address = address;
661 662 663

		get_futex_key_refs(key); /* implies smp_mb(); (B) */

664
	} else {
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
		struct inode *inode;

		/*
		 * The associated futex object in this case is the inode and
		 * the page->mapping must be traversed. Ordinarily this should
		 * be stabilised under page lock but it's not strictly
		 * necessary in this case as we just want to pin the inode, not
		 * update the radix tree or anything like that.
		 *
		 * The RCU read lock is taken as the inode is finally freed
		 * under RCU. If the mapping still matches expectations then the
		 * mapping->host can be safely accessed as being a valid inode.
		 */
		rcu_read_lock();

		if (READ_ONCE(page->mapping) != mapping) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		inode = READ_ONCE(mapping->host);
		if (!inode) {
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/*
		 * Take a reference unless it is about to be freed. Previously
		 * this reference was taken by ihold under the page lock
		 * pinning the inode in place so i_lock was unnecessary. The
		 * only way for this check to fail is if the inode was
700 701
		 * truncated in parallel which is almost certainly an
		 * application bug. In such a case, just retry.
702 703 704 705 706
		 *
		 * We are not calling into get_futex_key_refs() in file-backed
		 * cases, therefore a successful atomic_inc return below will
		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
		 */
707
		if (!atomic_inc_not_zero(&inode->i_count)) {
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
			rcu_read_unlock();
			put_page(page);

			goto again;
		}

		/* Should be impossible but lets be paranoid for now */
		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
			err = -EFAULT;
			rcu_read_unlock();
			iput(inode);

			goto out;
		}

723
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
724
		key->shared.inode = inode;
725
		key->shared.pgoff = basepage_index(tail);
726
		rcu_read_unlock();
L
Linus Torvalds 已提交
727 728
	}

729
out:
730
	put_page(page);
731
	return err;
L
Linus Torvalds 已提交
732 733
}

734
static inline void put_futex_key(union futex_key *key)
L
Linus Torvalds 已提交
735
{
736
	drop_futex_key_refs(key);
L
Linus Torvalds 已提交
737 738
}

739 740
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
741 742 743 744 745
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
746
 * We have no generic implementation of a non-destructive write to the
747 748 749 750 751 752
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
753 754 755 756
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
757
	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
758
			       FAULT_FLAG_WRITE, NULL);
759 760
	up_read(&mm->mmap_sem);

761 762 763
	return ret < 0 ? ret : 0;
}

764 765
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
766 767
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

783 784
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
785
{
786
	int ret;
T
Thomas Gleixner 已提交
787 788

	pagefault_disable();
789
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
790 791
	pagefault_enable();

792
	return ret;
T
Thomas Gleixner 已提交
793 794 795
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
796 797 798
{
	int ret;

799
	pagefault_disable();
800
	ret = __get_user(*dest, from);
801
	pagefault_enable();
L
Linus Torvalds 已提交
802 803 804 805

	return ret ? -EFAULT : 0;
}

806 807 808 809 810 811 812 813 814 815 816

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

817
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
818 819 820 821 822 823 824

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
825
	refcount_set(&pi_state->refcount, 1);
826
	pi_state->key = FUTEX_KEY_INIT;
827 828 829 830 831 832

	current->pi_state_cache = pi_state;

	return 0;
}

P
Peter Zijlstra 已提交
833
static struct futex_pi_state *alloc_pi_state(void)
834 835 836 837 838 839 840 841 842
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

P
Peter Zijlstra 已提交
843 844
static void get_pi_state(struct futex_pi_state *pi_state)
{
845
	WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
P
Peter Zijlstra 已提交
846 847
}

848
/*
849 850
 * Drops a reference to the pi_state object and frees or caches it
 * when the last reference is gone.
851
 */
852
static void put_pi_state(struct futex_pi_state *pi_state)
853
{
854 855 856
	if (!pi_state)
		return;

857
	if (!refcount_dec_and_test(&pi_state->refcount))
858 859 860 861 862 863 864
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
865
		struct task_struct *owner;
866

867 868 869 870 871 872 873 874 875
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		owner = pi_state->owner;
		if (owner) {
			raw_spin_lock(&owner->pi_lock);
			list_del_init(&pi_state->list);
			raw_spin_unlock(&owner->pi_lock);
		}
		rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
876 877
	}

878
	if (current->pi_state_cache) {
879
		kfree(pi_state);
880
	} else {
881 882 883 884 885 886
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
887
		refcount_set(&pi_state->refcount, 1);
888 889 890 891
		current->pi_state_cache = pi_state;
	}
}

892 893
#ifdef CONFIG_FUTEX_PI

894 895 896 897 898
/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
899
static void exit_pi_state_list(struct task_struct *curr)
900 901 902
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
903
	struct futex_hash_bucket *hb;
904
	union futex_key key = FUTEX_KEY_INIT;
905

906 907
	if (!futex_cmpxchg_enabled)
		return;
908 909 910
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
911
	 * versus waiters unqueueing themselves:
912
	 */
913
	raw_spin_lock_irq(&curr->pi_lock);
914 915 916 917
	while (!list_empty(head)) {
		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
918
		hb = hash_futex(&key);
919 920 921 922 923 924 925 926 927 928 929

		/*
		 * We can race against put_pi_state() removing itself from the
		 * list (a waiter going away). put_pi_state() will first
		 * decrement the reference count and then modify the list, so
		 * its possible to see the list entry but fail this reference
		 * acquire.
		 *
		 * In that case; drop the locks to let put_pi_state() make
		 * progress and retry the loop.
		 */
930
		if (!refcount_inc_not_zero(&pi_state->refcount)) {
931 932 933 934 935
			raw_spin_unlock_irq(&curr->pi_lock);
			cpu_relax();
			raw_spin_lock_irq(&curr->pi_lock);
			continue;
		}
936
		raw_spin_unlock_irq(&curr->pi_lock);
937 938

		spin_lock(&hb->lock);
939 940
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
		raw_spin_lock(&curr->pi_lock);
941 942 943 944
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
945
		if (head->next != next) {
946
			/* retain curr->pi_lock for the loop invariant */
947
			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
948
			spin_unlock(&hb->lock);
949
			put_pi_state(pi_state);
950 951 952 953
			continue;
		}

		WARN_ON(pi_state->owner != curr);
954 955
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
956 957
		pi_state->owner = NULL;

958
		raw_spin_unlock(&curr->pi_lock);
959
		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
960 961
		spin_unlock(&hb->lock);

962 963 964
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);

965
		raw_spin_lock_irq(&curr->pi_lock);
966
	}
967
	raw_spin_unlock_irq(&curr->pi_lock);
968
}
969 970
#else
static inline void exit_pi_state_list(struct task_struct *curr) { }
971 972
#endif

973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
/*
 * We need to check the following states:
 *
 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 *
 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 *
 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 *
 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 *
 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 *
 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 *
 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 *
 * [1]	Indicates that the kernel can acquire the futex atomically. We
 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 *
 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 *      thread is found then it indicates that the owner TID has died.
 *
 * [3]	Invalid. The waiter is queued on a non PI futex
 *
 * [4]	Valid state after exit_robust_list(), which sets the user space
 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 *
 * [5]	The user space value got manipulated between exit_robust_list()
 *	and exit_pi_state_list()
 *
 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 *	the pi_state but cannot access the user space value.
 *
 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 *
 * [8]	Owner and user space value match
 *
 * [9]	There is no transient state which sets the user space TID to 0
 *	except exit_robust_list(), but this is indicated by the
 *	FUTEX_OWNER_DIED bit. See [4]
 *
 * [10] There is no transient state which leaves owner and user space
 *	TID out of sync.
P
Peter Zijlstra 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
 *
 *
 * Serialization and lifetime rules:
 *
 * hb->lock:
 *
 *	hb -> futex_q, relation
 *	futex_q -> pi_state, relation
 *
 *	(cannot be raw because hb can contain arbitrary amount
 *	 of futex_q's)
 *
 * pi_mutex->wait_lock:
 *
 *	{uval, pi_state}
 *
 *	(and pi_mutex 'obviously')
 *
 * p->pi_lock:
 *
 *	p->pi_state_list -> pi_state->list, relation
 *
 * pi_state->refcount:
 *
 *	pi_state lifetime
 *
 *
 * Lock order:
 *
 *   hb->lock
 *     pi_mutex->wait_lock
 *       p->pi_lock
 *
1054
 */
1055 1056 1057 1058 1059 1060

/*
 * Validate that the existing waiter has a pi_state and sanity check
 * the pi_state against the user space value. If correct, attach to
 * it.
 */
P
Peter Zijlstra 已提交
1061 1062
static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
			      struct futex_pi_state *pi_state,
1063
			      struct futex_pi_state **ps)
1064
{
1065
	pid_t pid = uval & FUTEX_TID_MASK;
1066 1067
	u32 uval2;
	int ret;
1068

1069 1070 1071 1072 1073
	/*
	 * Userspace might have messed up non-PI and PI futexes [3]
	 */
	if (unlikely(!pi_state))
		return -EINVAL;
1074

P
Peter Zijlstra 已提交
1075 1076 1077 1078 1079 1080
	/*
	 * We get here with hb->lock held, and having found a
	 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
	 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
	 * which in turn means that futex_lock_pi() still has a reference on
	 * our pi_state.
1081 1082 1083 1084 1085
	 *
	 * The waiter holding a reference on @pi_state also protects against
	 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
	 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
	 * free pi_state before we can take a reference ourselves.
P
Peter Zijlstra 已提交
1086
	 */
1087
	WARN_ON(!refcount_read(&pi_state->refcount));
1088

P
Peter Zijlstra 已提交
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	/*
	 * Now that we have a pi_state, we can acquire wait_lock
	 * and do the state validation.
	 */
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	/*
	 * Since {uval, pi_state} is serialized by wait_lock, and our current
	 * uval was read without holding it, it can have changed. Verify it
	 * still is what we expect it to be, otherwise retry the entire
	 * operation.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		goto out_efault;

	if (uval != uval2)
		goto out_eagain;

1107 1108 1109 1110
	/*
	 * Handle the owner died case:
	 */
	if (uval & FUTEX_OWNER_DIED) {
1111
		/*
1112 1113 1114
		 * exit_pi_state_list sets owner to NULL and wakes the
		 * topmost waiter. The task which acquires the
		 * pi_state->rt_mutex will fixup owner.
1115
		 */
1116
		if (!pi_state->owner) {
1117
			/*
1118 1119
			 * No pi state owner, but the user space TID
			 * is not 0. Inconsistent state. [5]
1120
			 */
1121
			if (pid)
P
Peter Zijlstra 已提交
1122
				goto out_einval;
1123
			/*
1124
			 * Take a ref on the state and return success. [4]
1125
			 */
P
Peter Zijlstra 已提交
1126
			goto out_attach;
1127
		}
1128 1129

		/*
1130 1131 1132 1133 1134 1135 1136 1137
		 * If TID is 0, then either the dying owner has not
		 * yet executed exit_pi_state_list() or some waiter
		 * acquired the rtmutex in the pi state, but did not
		 * yet fixup the TID in user space.
		 *
		 * Take a ref on the state and return success. [6]
		 */
		if (!pid)
P
Peter Zijlstra 已提交
1138
			goto out_attach;
1139 1140 1141 1142
	} else {
		/*
		 * If the owner died bit is not set, then the pi_state
		 * must have an owner. [7]
1143
		 */
1144
		if (!pi_state->owner)
P
Peter Zijlstra 已提交
1145
			goto out_einval;
1146 1147
	}

1148 1149 1150 1151 1152 1153
	/*
	 * Bail out if user space manipulated the futex value. If pi
	 * state exists then the owner TID must be the same as the
	 * user space TID. [9/10]
	 */
	if (pid != task_pid_vnr(pi_state->owner))
P
Peter Zijlstra 已提交
1154 1155 1156
		goto out_einval;

out_attach:
P
Peter Zijlstra 已提交
1157
	get_pi_state(pi_state);
P
Peter Zijlstra 已提交
1158
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1159 1160
	*ps = pi_state;
	return 0;
P
Peter Zijlstra 已提交
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176

out_einval:
	ret = -EINVAL;
	goto out_error;

out_eagain:
	ret = -EAGAIN;
	goto out_error;

out_efault:
	ret = -EFAULT;
	goto out_error;

out_error:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
1177 1178
}

T
Thomas Gleixner 已提交
1179 1180
/**
 * wait_for_owner_exiting - Block until the owner has exited
1181
 * @ret: owner's current futex lock status
T
Thomas Gleixner 已提交
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
 * @exiting:	Pointer to the exiting task
 *
 * Caller must hold a refcount on @exiting.
 */
static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
{
	if (ret != -EBUSY) {
		WARN_ON_ONCE(exiting);
		return;
	}

	if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
		return;

	mutex_lock(&exiting->futex_exit_mutex);
	/*
	 * No point in doing state checking here. If the waiter got here
	 * while the task was in exec()->exec_futex_release() then it can
	 * have any FUTEX_STATE_* value when the waiter has acquired the
	 * mutex. OK, if running, EXITING or DEAD if it reached exit()
	 * already. Highly unlikely and not a problem. Just one more round
	 * through the futex maze.
	 */
	mutex_unlock(&exiting->futex_exit_mutex);

	put_task_struct(exiting);
}

T
Thomas Gleixner 已提交
1210 1211 1212 1213 1214 1215
static int handle_exit_race(u32 __user *uaddr, u32 uval,
			    struct task_struct *tsk)
{
	u32 uval2;

	/*
1216 1217
	 * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
	 * caller that the alleged owner is busy.
T
Thomas Gleixner 已提交
1218
	 */
1219
	if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
1220
		return -EBUSY;
T
Thomas Gleixner 已提交
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237

	/*
	 * Reread the user space value to handle the following situation:
	 *
	 * CPU0				CPU1
	 *
	 * sys_exit()			sys_futex()
	 *  do_exit()			 futex_lock_pi()
	 *                                futex_lock_pi_atomic()
	 *   exit_signals(tsk)		    No waiters:
	 *    tsk->flags |= PF_EXITING;	    *uaddr == 0x00000PID
	 *  mm_release(tsk)		    Set waiter bit
	 *   exit_robust_list(tsk) {	    *uaddr = 0x80000PID;
	 *      Set owner died		    attach_to_pi_owner() {
	 *    *uaddr = 0xC0000000;	     tsk = get_task(PID);
	 *   }				     if (!tsk->flags & PF_EXITING) {
	 *  ...				       attach();
1238 1239 1240
	 *  tsk->futex_state =               } else {
	 *	FUTEX_STATE_DEAD;              if (tsk->futex_state !=
	 *					  FUTEX_STATE_DEAD)
T
Thomas Gleixner 已提交
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
	 *				         return -EAGAIN;
	 *				       return -ESRCH; <--- FAIL
	 *				     }
	 *
	 * Returning ESRCH unconditionally is wrong here because the
	 * user space value has been changed by the exiting task.
	 *
	 * The same logic applies to the case where the exiting task is
	 * already gone.
	 */
	if (get_futex_value_locked(&uval2, uaddr))
		return -EFAULT;

	/* If the user space value has changed, try again. */
	if (uval2 != uval)
		return -EAGAIN;

	/*
	 * The exiting task did not have a robust list, the robust list was
	 * corrupted or the user space value in *uaddr is simply bogus.
	 * Give up and tell user space.
	 */
	return -ESRCH;
}

1266 1267 1268 1269
/*
 * Lookup the task for the TID provided from user space and attach to
 * it after doing proper sanity checks.
 */
T
Thomas Gleixner 已提交
1270
static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
T
Thomas Gleixner 已提交
1271 1272
			      struct futex_pi_state **ps,
			      struct task_struct **exiting)
1273 1274
{
	pid_t pid = uval & FUTEX_TID_MASK;
1275 1276
	struct futex_pi_state *pi_state;
	struct task_struct *p;
1277

1278
	/*
1279
	 * We are the first waiter - try to look up the real owner and attach
1280
	 * the new pi_state to it, but bail out when TID = 0 [1]
T
Thomas Gleixner 已提交
1281 1282 1283
	 *
	 * The !pid check is paranoid. None of the call sites should end up
	 * with pid == 0, but better safe than sorry. Let the caller retry
1284
	 */
1285
	if (!pid)
T
Thomas Gleixner 已提交
1286
		return -EAGAIN;
1287
	p = find_get_task_by_vpid(pid);
1288
	if (!p)
T
Thomas Gleixner 已提交
1289
		return handle_exit_race(uaddr, uval, NULL);
1290

1291
	if (unlikely(p->flags & PF_KTHREAD)) {
1292 1293 1294 1295
		put_task_struct(p);
		return -EPERM;
	}

1296
	/*
1297 1298 1299
	 * We need to look at the task state to figure out, whether the
	 * task is exiting. To protect against the change of the task state
	 * in futex_exit_release(), we do this protected by p->pi_lock:
1300
	 */
1301
	raw_spin_lock_irq(&p->pi_lock);
1302
	if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
1303
		/*
1304 1305 1306
		 * The task is on the way out. When the futex state is
		 * FUTEX_STATE_DEAD, we know that the task has finished
		 * the cleanup:
1307
		 */
T
Thomas Gleixner 已提交
1308
		int ret = handle_exit_race(uaddr, uval, p);
1309

1310
		raw_spin_unlock_irq(&p->pi_lock);
T
Thomas Gleixner 已提交
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
		/*
		 * If the owner task is between FUTEX_STATE_EXITING and
		 * FUTEX_STATE_DEAD then store the task pointer and keep
		 * the reference on the task struct. The calling code will
		 * drop all locks, wait for the task to reach
		 * FUTEX_STATE_DEAD and then drop the refcount. This is
		 * required to prevent a live lock when the current task
		 * preempted the exiting task between the two states.
		 */
		if (ret == -EBUSY)
			*exiting = p;
		else
			put_task_struct(p);
1324 1325
		return ret;
	}
1326

1327 1328
	/*
	 * No existing pi state. First waiter. [2]
P
Peter Zijlstra 已提交
1329 1330 1331
	 *
	 * This creates pi_state, we have hb->lock held, this means nothing can
	 * observe this state, wait_lock is irrelevant.
1332
	 */
1333 1334 1335
	pi_state = alloc_pi_state();

	/*
1336
	 * Initialize the pi_mutex in locked state and make @p
1337 1338 1339 1340 1341
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
1342
	pi_state->key = *key;
1343

1344
	WARN_ON(!list_empty(&pi_state->list));
1345
	list_add(&pi_state->list, &p->pi_state_list);
1346 1347 1348 1349
	/*
	 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
	 * because there is no concurrency as the object is not published yet.
	 */
1350
	pi_state->owner = p;
1351
	raw_spin_unlock_irq(&p->pi_lock);
1352 1353 1354

	put_task_struct(p);

P
Pierre Peiffer 已提交
1355
	*ps = pi_state;
1356 1357 1358 1359

	return 0;
}

P
Peter Zijlstra 已提交
1360 1361
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
			   struct futex_hash_bucket *hb,
T
Thomas Gleixner 已提交
1362 1363
			   union futex_key *key, struct futex_pi_state **ps,
			   struct task_struct **exiting)
1364
{
1365
	struct futex_q *top_waiter = futex_top_waiter(hb, key);
1366 1367 1368 1369 1370

	/*
	 * If there is a waiter on that futex, validate it and
	 * attach to the pi_state when the validation succeeds.
	 */
1371
	if (top_waiter)
P
Peter Zijlstra 已提交
1372
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1373 1374 1375 1376 1377

	/*
	 * We are the first waiter - try to look up the owner based on
	 * @uval and attach to it.
	 */
T
Thomas Gleixner 已提交
1378
	return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
1379 1380
}

1381 1382
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
1383
	int err;
1384 1385
	u32 uninitialized_var(curval);

1386 1387 1388
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1389 1390 1391
	err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
	if (unlikely(err))
		return err;
1392

P
Peter Zijlstra 已提交
1393
	/* If user space value changed, let the caller retry */
1394 1395 1396
	return curval != uval ? -EAGAIN : 0;
}

1397
/**
1398
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1399 1400 1401 1402 1403 1404 1405
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
T
Thomas Gleixner 已提交
1406 1407
 * @exiting:		Pointer to store the task pointer of the owner task
 *			which is in the middle of exiting
1408
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1409
 *
1410
 * Return:
1411 1412 1413
 *  -  0 - ready to wait;
 *  -  1 - acquired the lock;
 *  - <0 - error
1414 1415
 *
 * The hb->lock and futex_key refs shall be held by the caller.
T
Thomas Gleixner 已提交
1416 1417 1418 1419
 *
 * @exiting is only set when the return value is -EBUSY. If so, this holds
 * a refcount on the exiting task on return and the caller needs to drop it
 * after waiting for the exit to complete.
1420 1421 1422 1423
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
T
Thomas Gleixner 已提交
1424 1425 1426
				struct task_struct *task,
				struct task_struct **exiting,
				int set_waiters)
1427
{
1428
	u32 uval, newval, vpid = task_pid_vnr(task);
1429
	struct futex_q *top_waiter;
1430
	int ret;
1431 1432

	/*
1433 1434
	 * Read the user space value first so we can validate a few
	 * things before proceeding further.
1435
	 */
1436
	if (get_futex_value_locked(&uval, uaddr))
1437 1438
		return -EFAULT;

1439 1440 1441
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1442 1443 1444
	/*
	 * Detect deadlocks.
	 */
1445
	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1446 1447
		return -EDEADLK;

1448 1449 1450
	if ((unlikely(should_fail_futex(true))))
		return -EDEADLK;

1451
	/*
1452 1453
	 * Lookup existing state first. If it exists, try to attach to
	 * its pi_state.
1454
	 */
1455 1456
	top_waiter = futex_top_waiter(hb, key);
	if (top_waiter)
P
Peter Zijlstra 已提交
1457
		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1458 1459

	/*
1460 1461 1462 1463
	 * No waiter and user TID is 0. We are here because the
	 * waiters or the owner died bit is set or called from
	 * requeue_cmp_pi or for whatever reason something took the
	 * syscall.
1464
	 */
1465
	if (!(uval & FUTEX_TID_MASK)) {
1466
		/*
1467 1468
		 * We take over the futex. No other waiters and the user space
		 * TID is 0. We preserve the owner died bit.
1469
		 */
1470 1471
		newval = uval & FUTEX_OWNER_DIED;
		newval |= vpid;
1472

1473 1474 1475 1476 1477 1478 1479 1480
		/* The futex requeue_pi code can enforce the waiters bit */
		if (set_waiters)
			newval |= FUTEX_WAITERS;

		ret = lock_pi_update_atomic(uaddr, uval, newval);
		/* If the take over worked, return 1 */
		return ret < 0 ? ret : 1;
	}
1481 1482

	/*
1483 1484 1485
	 * First waiter. Set the waiters bit before attaching ourself to
	 * the owner. If owner tries to unlock, it will be forced into
	 * the kernel and blocked on hb->lock.
1486
	 */
1487 1488 1489 1490
	newval = uval | FUTEX_WAITERS;
	ret = lock_pi_update_atomic(uaddr, uval, newval);
	if (ret)
		return ret;
1491
	/*
1492 1493 1494
	 * If the update of the user space value succeeded, we try to
	 * attach to the owner. If that fails, no harm done, we only
	 * set the FUTEX_WAITERS bit in the user space variable.
1495
	 */
T
Thomas Gleixner 已提交
1496
	return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
1497 1498
}

1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

1509
	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1510
		return;
1511
	lockdep_assert_held(q->lock_ptr);
1512 1513 1514

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
1515
	hb_waiters_dec(hb);
1516 1517
}

L
Linus Torvalds 已提交
1518 1519
/*
 * The hash bucket lock must be held when this is called.
1520 1521 1522
 * Afterwards, the futex_q must not be accessed. Callers
 * must ensure to later call wake_up_q() for the actual
 * wakeups to occur.
L
Linus Torvalds 已提交
1523
 */
1524
static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
L
Linus Torvalds 已提交
1525
{
T
Thomas Gleixner 已提交
1526 1527
	struct task_struct *p = q->task;

1528 1529 1530
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

1531
	get_task_struct(p);
1532
	__unqueue_futex(q);
L
Linus Torvalds 已提交
1533
	/*
1534 1535 1536 1537 1538
	 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
	 * is written, without taking any locks. This is possible in the event
	 * of a spurious wakeup, for example. A memory barrier is required here
	 * to prevent the following store to lock_ptr from getting ahead of the
	 * plist_del in __unqueue_futex().
L
Linus Torvalds 已提交
1539
	 */
1540
	smp_store_release(&q->lock_ptr, NULL);
1541 1542 1543

	/*
	 * Queue the task for later wakeup for after we've released
1544
	 * the hb->lock.
1545
	 */
1546
	wake_q_add_safe(wake_q, p);
L
Linus Torvalds 已提交
1547 1548
}

1549 1550 1551 1552
/*
 * Caller must hold a reference on @pi_state.
 */
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1553
{
1554
	u32 uninitialized_var(curval), newval;
1555
	struct task_struct *new_owner;
P
Peter Zijlstra 已提交
1556
	bool postunlock = false;
1557
	DEFINE_WAKE_Q(wake_q);
1558
	int ret = 0;
1559 1560

	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1561
	if (WARN_ON_ONCE(!new_owner)) {
1562
		/*
1563
		 * As per the comment in futex_unlock_pi() this should not happen.
1564 1565 1566 1567 1568 1569 1570 1571
		 *
		 * When this happens, give up our locks and try again, giving
		 * the futex_lock_pi() instance time to complete, either by
		 * waiting on the rtmutex or removing itself from the futex
		 * queue.
		 */
		ret = -EAGAIN;
		goto out_unlock;
1572
	}
1573 1574

	/*
1575 1576 1577
	 * We pass it to the next owner. The WAITERS bit is always kept
	 * enabled while there is PI state around. We cleanup the owner
	 * died bit, because we are the owner.
1578
	 */
1579
	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1580

1581 1582 1583
	if (unlikely(should_fail_futex(true)))
		ret = -EFAULT;

1584 1585
	ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
	if (!ret && (curval != uval)) {
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
		/*
		 * If a unconditional UNLOCK_PI operation (user space did not
		 * try the TID->0 transition) raced with a waiter setting the
		 * FUTEX_WAITERS flag between get_user() and locking the hash
		 * bucket lock, retry the operation.
		 */
		if ((FUTEX_TID_MASK & curval) == uval)
			ret = -EAGAIN;
		else
			ret = -EINVAL;
	}
P
Peter Zijlstra 已提交
1597

1598 1599
	if (ret)
		goto out_unlock;
1600

1601 1602 1603 1604 1605
	/*
	 * This is a point of no return; once we modify the uval there is no
	 * going back and subsequent operations must not fail.
	 */

1606
	raw_spin_lock(&pi_state->owner->pi_lock);
1607 1608
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
1609
	raw_spin_unlock(&pi_state->owner->pi_lock);
1610

1611
	raw_spin_lock(&new_owner->pi_lock);
1612
	WARN_ON(!list_empty(&pi_state->list));
1613 1614
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
1615
	raw_spin_unlock(&new_owner->pi_lock);
1616

P
Peter Zijlstra 已提交
1617
	postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1618

1619
out_unlock:
1620 1621
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

P
Peter Zijlstra 已提交
1622 1623
	if (postunlock)
		rt_mutex_postunlock(&wake_q);
1624

1625
	return ret;
1626 1627
}

I
Ingo Molnar 已提交
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
1644 1645 1646
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
1647
	spin_unlock(&hb1->lock);
1648 1649
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
1650 1651
}

L
Linus Torvalds 已提交
1652
/*
D
Darren Hart 已提交
1653
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
1654
 */
1655 1656
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
1657
{
1658
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1659
	struct futex_q *this, *next;
1660
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
1661
	int ret;
1662
	DEFINE_WAKE_Q(wake_q);
L
Linus Torvalds 已提交
1663

1664 1665 1666
	if (!bitset)
		return -EINVAL;

1667
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
L
Linus Torvalds 已提交
1668 1669 1670
	if (unlikely(ret != 0))
		goto out;

1671
	hb = hash_futex(&key);
1672 1673 1674 1675 1676

	/* Make sure we really have tasks to wakeup */
	if (!hb_waiters_pending(hb))
		goto out_put_key;

1677
	spin_lock(&hb->lock);
L
Linus Torvalds 已提交
1678

J
Jason Low 已提交
1679
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
L
Linus Torvalds 已提交
1680
		if (match_futex (&this->key, &key)) {
1681
			if (this->pi_state || this->rt_waiter) {
1682 1683 1684
				ret = -EINVAL;
				break;
			}
1685 1686 1687 1688 1689

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

1690
			mark_wake_futex(&wake_q, this);
L
Linus Torvalds 已提交
1691 1692 1693 1694 1695
			if (++ret >= nr_wake)
				break;
		}
	}

1696
	spin_unlock(&hb->lock);
1697
	wake_up_q(&wake_q);
1698
out_put_key:
1699
	put_futex_key(&key);
1700
out:
L
Linus Torvalds 已提交
1701 1702 1703
	return ret;
}

1704 1705 1706 1707
static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{
	unsigned int op =	  (encoded_op & 0x70000000) >> 28;
	unsigned int cmp =	  (encoded_op & 0x0f000000) >> 24;
1708 1709
	int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
	int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1710 1711 1712
	int oldval, ret;

	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
		if (oparg < 0 || oparg > 31) {
			char comm[sizeof(current->comm)];
			/*
			 * kill this print and return -EINVAL when userspace
			 * is sane again
			 */
			pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
					get_task_comm(comm, current), oparg);
			oparg &= 31;
		}
1723 1724 1725
		oparg = 1 << oparg;
	}

1726
	if (!access_ok(uaddr, sizeof(u32)))
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
		return -EFAULT;

	ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
	if (ret)
		return ret;

	switch (cmp) {
	case FUTEX_OP_CMP_EQ:
		return oldval == cmparg;
	case FUTEX_OP_CMP_NE:
		return oldval != cmparg;
	case FUTEX_OP_CMP_LT:
		return oldval < cmparg;
	case FUTEX_OP_CMP_GE:
		return oldval >= cmparg;
	case FUTEX_OP_CMP_LE:
		return oldval <= cmparg;
	case FUTEX_OP_CMP_GT:
		return oldval > cmparg;
	default:
		return -ENOSYS;
	}
}

1751 1752 1753 1754
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1755
static int
1756
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1757
	      int nr_wake, int nr_wake2, int op)
1758
{
1759
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1760
	struct futex_hash_bucket *hb1, *hb2;
1761
	struct futex_q *this, *next;
D
Darren Hart 已提交
1762
	int ret, op_ret;
1763
	DEFINE_WAKE_Q(wake_q);
1764

D
Darren Hart 已提交
1765
retry:
1766
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1767 1768
	if (unlikely(ret != 0))
		goto out;
1769
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1770
	if (unlikely(ret != 0))
1771
		goto out_put_key1;
1772

1773 1774
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1775

D
Darren Hart 已提交
1776
retry_private:
T
Thomas Gleixner 已提交
1777
	double_lock_hb(hb1, hb2);
1778
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1779
	if (unlikely(op_ret < 0)) {
D
Darren Hart 已提交
1780
		double_unlock_hb(hb1, hb2);
1781

1782 1783 1784 1785 1786 1787
		if (!IS_ENABLED(CONFIG_MMU) ||
		    unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
			/*
			 * we don't get EFAULT from MMU faults if we don't have
			 * an MMU, but we might get them from range checking
			 */
1788
			ret = op_ret;
1789
			goto out_put_keys;
1790 1791
		}

1792 1793 1794 1795 1796
		if (op_ret == -EFAULT) {
			ret = fault_in_user_writeable(uaddr2);
			if (ret)
				goto out_put_keys;
		}
1797

1798 1799
		if (!(flags & FLAGS_SHARED)) {
			cond_resched();
D
Darren Hart 已提交
1800
			goto retry_private;
1801
		}
D
Darren Hart 已提交
1802

1803 1804
		put_futex_key(&key2);
		put_futex_key(&key1);
1805
		cond_resched();
D
Darren Hart 已提交
1806
		goto retry;
1807 1808
	}

J
Jason Low 已提交
1809
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1810
		if (match_futex (&this->key, &key1)) {
1811 1812 1813 1814
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1815
			mark_wake_futex(&wake_q, this);
1816 1817 1818 1819 1820 1821 1822
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
		op_ret = 0;
J
Jason Low 已提交
1823
		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1824
			if (match_futex (&this->key, &key2)) {
1825 1826 1827 1828
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1829
				mark_wake_futex(&wake_q, this);
1830 1831 1832 1833 1834 1835 1836
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1837
out_unlock:
D
Darren Hart 已提交
1838
	double_unlock_hb(hb1, hb2);
1839
	wake_up_q(&wake_q);
1840
out_put_keys:
1841
	put_futex_key(&key2);
1842
out_put_key1:
1843
	put_futex_key(&key1);
1844
out:
1845 1846 1847
	return ret;
}

D
Darren Hart 已提交
1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
1866 1867
		hb_waiters_dec(hb1);
		hb_waiters_inc(hb2);
1868
		plist_add(&q->list, &hb2->chain);
D
Darren Hart 已提交
1869 1870 1871 1872 1873 1874
		q->lock_ptr = &hb2->lock;
	}
	get_futex_key_refs(key2);
	q->key = *key2;
}

1875 1876
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1877 1878 1879
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1880 1881 1882 1883 1884
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1885 1886 1887
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1888 1889
 */
static inline
1890 1891
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1892 1893 1894 1895
{
	get_futex_key_refs(key);
	q->key = *key;

1896
	__unqueue_futex(q);
1897 1898 1899 1900

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1901 1902
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1903
	wake_up_state(q->task, TASK_NORMAL);
1904 1905 1906 1907
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1908 1909 1910 1911 1912 1913
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
T
Thomas Gleixner 已提交
1914 1915
 * @exiting:		Pointer to store the task pointer of the owner task
 *			which is in the middle of exiting
1916
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1917 1918
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1919 1920 1921
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1922
 *
T
Thomas Gleixner 已提交
1923 1924 1925 1926
 * @exiting is only set when the return value is -EBUSY. If so, this holds
 * a refcount on the exiting task on return and the caller needs to drop it
 * after waiting for the exit to complete.
 *
1927
 * Return:
1928 1929 1930
 *  -  0 - failed to acquire the lock atomically;
 *  - >0 - acquired the lock, return value is vpid of the top_waiter
 *  - <0 - error
1931
 */
T
Thomas Gleixner 已提交
1932 1933 1934 1935 1936
static int
futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
			   struct futex_hash_bucket *hb2, union futex_key *key1,
			   union futex_key *key2, struct futex_pi_state **ps,
			   struct task_struct **exiting, int set_waiters)
1937
{
1938
	struct futex_q *top_waiter = NULL;
1939
	u32 curval;
1940
	int ret, vpid;
1941 1942 1943 1944

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1945 1946 1947
	if (unlikely(should_fail_futex(true)))
		return -EFAULT;

1948 1949 1950 1951 1952 1953 1954 1955
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1956 1957 1958 1959 1960 1961
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1962 1963 1964 1965
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1966
	/*
1967 1968 1969
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1970
	 */
1971
	vpid = task_pid_vnr(top_waiter->task);
1972
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
T
Thomas Gleixner 已提交
1973
				   exiting, set_waiters);
1974
	if (ret == 1) {
1975
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1976 1977
		return vpid;
	}
1978 1979 1980 1981 1982
	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1983
 * @uaddr1:	source futex user address
1984
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1985 1986 1987 1988 1989
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1990
 *		pi futex (pi to pi requeue is not supported)
1991 1992 1993 1994
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1995
 * Return:
1996 1997
 *  - >=0 - on success, the number of tasks requeued or woken;
 *  -  <0 - on error
L
Linus Torvalds 已提交
1998
 */
1999 2000 2001
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
2002
{
2003
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
2004 2005
	int drop_count = 0, task_count = 0, ret;
	struct futex_pi_state *pi_state = NULL;
2006
	struct futex_hash_bucket *hb1, *hb2;
L
Linus Torvalds 已提交
2007
	struct futex_q *this, *next;
2008
	DEFINE_WAKE_Q(wake_q);
2009

2010 2011 2012
	if (nr_wake < 0 || nr_requeue < 0)
		return -EINVAL;

2013 2014 2015 2016 2017 2018 2019 2020 2021
	/*
	 * When PI not supported: return -ENOSYS if requeue_pi is true,
	 * consequently the compiler knows requeue_pi is always false past
	 * this point which will optimize away all the conditional code
	 * further down.
	 */
	if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
		return -ENOSYS;

2022
	if (requeue_pi) {
2023 2024 2025 2026 2027 2028 2029
		/*
		 * Requeue PI only works on two distinct uaddrs. This
		 * check is only valid for private futexes. See below.
		 */
		if (uaddr1 == uaddr2)
			return -EINVAL;

2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
2049

2050
retry:
2051
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
L
Linus Torvalds 已提交
2052 2053
	if (unlikely(ret != 0))
		goto out;
2054
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
2055
			    requeue_pi ? FUTEX_WRITE : FUTEX_READ);
L
Linus Torvalds 已提交
2056
	if (unlikely(ret != 0))
2057
		goto out_put_key1;
L
Linus Torvalds 已提交
2058

2059 2060 2061 2062 2063 2064 2065 2066 2067
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (requeue_pi && match_futex(&key1, &key2)) {
		ret = -EINVAL;
		goto out_put_keys;
	}

2068 2069
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
2070

D
Darren Hart 已提交
2071
retry_private:
2072
	hb_waiters_inc(hb2);
I
Ingo Molnar 已提交
2073
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
2074

2075 2076
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
2077

2078
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
2079 2080

		if (unlikely(ret)) {
D
Darren Hart 已提交
2081
			double_unlock_hb(hb1, hb2);
2082
			hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
2083

2084
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
2085 2086
			if (ret)
				goto out_put_keys;
L
Linus Torvalds 已提交
2087

2088
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2089
				goto retry_private;
L
Linus Torvalds 已提交
2090

2091 2092
			put_futex_key(&key2);
			put_futex_key(&key1);
D
Darren Hart 已提交
2093
			goto retry;
L
Linus Torvalds 已提交
2094
		}
2095
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
2096 2097 2098 2099 2100
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

2101
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
T
Thomas Gleixner 已提交
2102 2103
		struct task_struct *exiting = NULL;

2104 2105 2106 2107 2108 2109
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
2110
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
T
Thomas Gleixner 已提交
2111 2112
						 &key2, &pi_state,
						 &exiting, nr_requeue);
2113 2114 2115 2116 2117

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
2118 2119
		 * reference to it. If the lock was taken, ret contains the
		 * vpid of the top waiter task.
2120 2121
		 * If the lock was not taken, we have pi_state and an initial
		 * refcount on it. In case of an error we have nothing.
2122
		 */
2123
		if (ret > 0) {
2124
			WARN_ON(pi_state);
2125
			drop_count++;
2126
			task_count++;
2127
			/*
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
			 * If we acquired the lock, then the user space value
			 * of uaddr2 should be vpid. It cannot be changed by
			 * the top waiter as it is blocked on hb2 lock if it
			 * tries to do so. If something fiddled with it behind
			 * our back the pi state lookup might unearth it. So
			 * we rather use the known value than rereading and
			 * handing potential crap to lookup_pi_state.
			 *
			 * If that call succeeds then we have pi_state and an
			 * initial refcount on it.
2138
			 */
T
Thomas Gleixner 已提交
2139 2140
			ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
					      &pi_state, &exiting);
2141 2142 2143 2144
		}

		switch (ret) {
		case 0:
2145
			/* We hold a reference on the pi state. */
2146
			break;
2147 2148

			/* If the above failed, then pi_state is NULL */
2149 2150
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
2151
			hb_waiters_dec(hb2);
2152 2153
			put_futex_key(&key2);
			put_futex_key(&key1);
2154
			ret = fault_in_user_writeable(uaddr2);
2155 2156 2157
			if (!ret)
				goto retry;
			goto out;
2158
		case -EBUSY:
2159
		case -EAGAIN:
2160 2161
			/*
			 * Two reasons for this:
2162
			 * - EBUSY: Owner is exiting and we just wait for the
2163
			 *   exit to complete.
2164
			 * - EAGAIN: The user space value changed.
2165
			 */
2166
			double_unlock_hb(hb1, hb2);
2167
			hb_waiters_dec(hb2);
2168 2169
			put_futex_key(&key2);
			put_futex_key(&key1);
T
Thomas Gleixner 已提交
2170 2171 2172 2173 2174 2175
			/*
			 * Handle the case where the owner is in the middle of
			 * exiting. Wait for the exit to complete otherwise
			 * this task might loop forever, aka. live lock.
			 */
			wait_for_owner_exiting(ret, exiting);
2176 2177 2178 2179 2180 2181 2182
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

J
Jason Low 已提交
2183
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2184 2185 2186 2187
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
2188
			continue;
2189

2190 2191 2192
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
2193 2194 2195
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
2196 2197
		 */
		if ((requeue_pi && !this->rt_waiter) ||
2198 2199
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
2200 2201 2202
			ret = -EINVAL;
			break;
		}
2203 2204 2205 2206 2207 2208 2209

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
2210
			mark_wake_futex(&wake_q, this);
2211 2212
			continue;
		}
L
Linus Torvalds 已提交
2213

2214 2215 2216 2217 2218 2219
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

2220 2221 2222 2223 2224
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
2225 2226 2227 2228 2229
			/*
			 * Prepare the waiter to take the rt_mutex. Take a
			 * refcount on the pi_state and store the pointer in
			 * the futex_q object of the waiter.
			 */
P
Peter Zijlstra 已提交
2230
			get_pi_state(pi_state);
2231 2232 2233
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
2234
							this->task);
2235
			if (ret == 1) {
2236 2237 2238 2239 2240 2241 2242 2243
				/*
				 * We got the lock. We do neither drop the
				 * refcount on pi_state nor clear
				 * this->pi_state because the waiter needs the
				 * pi_state for cleaning up the user space
				 * value. It will drop the refcount after
				 * doing so.
				 */
2244
				requeue_pi_wake_futex(this, &key2, hb2);
2245
				drop_count++;
2246 2247
				continue;
			} else if (ret) {
2248 2249 2250 2251 2252 2253 2254 2255
				/*
				 * rt_mutex_start_proxy_lock() detected a
				 * potential deadlock when we tried to queue
				 * that waiter. Drop the pi_state reference
				 * which we took above and remove the pointer
				 * to the state from the waiters futex_q
				 * object.
				 */
2256
				this->pi_state = NULL;
2257
				put_pi_state(pi_state);
2258 2259 2260 2261 2262
				/*
				 * We stop queueing more waiters and let user
				 * space deal with the mess.
				 */
				break;
2263
			}
L
Linus Torvalds 已提交
2264
		}
2265 2266
		requeue_futex(this, hb1, hb2, &key2);
		drop_count++;
L
Linus Torvalds 已提交
2267 2268
	}

2269 2270 2271 2272 2273
	/*
	 * We took an extra initial reference to the pi_state either
	 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
	 * need to drop it here again.
	 */
2274
	put_pi_state(pi_state);
2275 2276

out_unlock:
D
Darren Hart 已提交
2277
	double_unlock_hb(hb1, hb2);
2278
	wake_up_q(&wake_q);
2279
	hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
2280

2281 2282 2283 2284 2285 2286
	/*
	 * drop_futex_key_refs() must be called outside the spinlocks. During
	 * the requeue we moved futex_q's from the hash bucket at key1 to the
	 * one at key2 and updated their key pointer.  We no longer need to
	 * hold the references to key1.
	 */
L
Linus Torvalds 已提交
2287
	while (--drop_count >= 0)
2288
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
2289

2290
out_put_keys:
2291
	put_futex_key(&key2);
2292
out_put_key1:
2293
	put_futex_key(&key1);
2294
out:
2295
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
2296 2297 2298
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
2299
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2300
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
2301
{
2302
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
2303

2304
	hb = hash_futex(&q->key);
2305 2306 2307 2308 2309 2310 2311 2312 2313

	/*
	 * Increment the counter before taking the lock so that
	 * a potential waker won't miss a to-be-slept task that is
	 * waiting for the spinlock. This is safe as all queue_lock()
	 * users end up calling queue_me(). Similarly, for housekeeping,
	 * decrement the counter at queue_unlock() when some error has
	 * occurred and we don't end up adding the task to the list.
	 */
D
Davidlohr Bueso 已提交
2314
	hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2315

2316
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
2317

D
Davidlohr Bueso 已提交
2318
	spin_lock(&hb->lock);
2319
	return hb;
L
Linus Torvalds 已提交
2320 2321
}

2322
static inline void
J
Jason Low 已提交
2323
queue_unlock(struct futex_hash_bucket *hb)
2324
	__releases(&hb->lock)
2325 2326
{
	spin_unlock(&hb->lock);
2327
	hb_waiters_dec(hb);
2328 2329
}

2330
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
L
Linus Torvalds 已提交
2331
{
P
Pierre Peiffer 已提交
2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
2346
	q->task = current;
2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
	__releases(&hb->lock)
{
	__queue_me(q, hb);
2365
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
2366 2367
}

2368 2369 2370 2371 2372 2373 2374
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
2375
 * Return:
2376 2377
 *  - 1 - if the futex_q was still queued (and we removed unqueued it);
 *  - 0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
2378 2379 2380 2381
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
2382
	int ret = 0;
L
Linus Torvalds 已提交
2383 2384

	/* In the common case we don't take the spinlock, which is nice. */
2385
retry:
2386 2387 2388 2389 2390 2391
	/*
	 * q->lock_ptr can change between this read and the following spin_lock.
	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
	 * optimizing lock_ptr out of the logic below.
	 */
	lock_ptr = READ_ONCE(q->lock_ptr);
2392
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
2411
		__unqueue_futex(q);
2412 2413 2414

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
2415 2416 2417 2418
		spin_unlock(lock_ptr);
		ret = 1;
	}

2419
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
2420 2421 2422
	return ret;
}

2423 2424
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
2425 2426
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
2427
 */
P
Pierre Peiffer 已提交
2428
static void unqueue_me_pi(struct futex_q *q)
2429
	__releases(q->lock_ptr)
2430
{
2431
	__unqueue_futex(q);
2432 2433

	BUG_ON(!q->pi_state);
2434
	put_pi_state(q->pi_state);
2435 2436
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
2437
	spin_unlock(q->lock_ptr);
2438 2439
}

2440
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2441
				struct task_struct *argowner)
P
Pierre Peiffer 已提交
2442 2443
{
	struct futex_pi_state *pi_state = q->pi_state;
2444
	u32 uval, uninitialized_var(curval), newval;
2445 2446
	struct task_struct *oldowner, *newowner;
	u32 newtid;
2447
	int ret, err = 0;
P
Pierre Peiffer 已提交
2448

2449 2450
	lockdep_assert_held(q->lock_ptr);

P
Peter Zijlstra 已提交
2451 2452 2453
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);

	oldowner = pi_state->owner;
2454 2455

	/*
2456
	 * We are here because either:
2457
	 *
2458 2459 2460 2461 2462 2463 2464 2465 2466
	 *  - we stole the lock and pi_state->owner needs updating to reflect
	 *    that (@argowner == current),
	 *
	 * or:
	 *
	 *  - someone stole our lock and we need to fix things to point to the
	 *    new owner (@argowner == NULL).
	 *
	 * Either way, we have to replace the TID in the user space variable.
2467
	 * This must be atomic as we have to preserve the owner died bit here.
2468
	 *
D
Darren Hart 已提交
2469 2470 2471
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
2472
	 *
P
Peter Zijlstra 已提交
2473 2474 2475 2476
	 * Modifying pi_state _before_ the user space value would leave the
	 * pi_state in an inconsistent state when we fault here, because we
	 * need to drop the locks to handle the fault. This might be observed
	 * in the PID check in lookup_pi_state.
2477 2478
	 */
retry:
2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
	if (!argowner) {
		if (oldowner != current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}

		if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
			/* We got the lock after all, nothing to fix. */
			ret = 0;
			goto out_unlock;
		}

		/*
		 * Since we just failed the trylock; there must be an owner.
		 */
		newowner = rt_mutex_owner(&pi_state->pi_mutex);
		BUG_ON(!newowner);
	} else {
		WARN_ON_ONCE(argowner != current);
		if (oldowner == current) {
			/*
			 * We raced against a concurrent self; things are
			 * already fixed up. Nothing to do.
			 */
			ret = 0;
			goto out_unlock;
		}
		newowner = argowner;
	}

	newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Peter Zijlstra 已提交
2514 2515 2516
	/* Owner died? */
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;
2517

2518 2519 2520
	err = get_futex_value_locked(&uval, uaddr);
	if (err)
		goto handle_err;
2521

2522
	for (;;) {
2523 2524
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

2525 2526 2527 2528
		err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
		if (err)
			goto handle_err;

2529 2530 2531 2532 2533 2534 2535 2536 2537
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
2538
	if (pi_state->owner != NULL) {
P
Peter Zijlstra 已提交
2539
		raw_spin_lock(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
2540 2541
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
P
Peter Zijlstra 已提交
2542
		raw_spin_unlock(&pi_state->owner->pi_lock);
2543
	}
P
Pierre Peiffer 已提交
2544

2545
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
2546

P
Peter Zijlstra 已提交
2547
	raw_spin_lock(&newowner->pi_lock);
P
Pierre Peiffer 已提交
2548
	WARN_ON(!list_empty(&pi_state->list));
2549
	list_add(&pi_state->list, &newowner->pi_state_list);
P
Peter Zijlstra 已提交
2550 2551 2552
	raw_spin_unlock(&newowner->pi_lock);
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

2553
	return 0;
P
Pierre Peiffer 已提交
2554 2555

	/*
2556 2557 2558 2559 2560 2561 2562
	 * In order to reschedule or handle a page fault, we need to drop the
	 * locks here. In the case of a fault, this gives the other task
	 * (either the highest priority waiter itself or the task which stole
	 * the rtmutex) the chance to try the fixup of the pi_state. So once we
	 * are back from handling the fault we need to check the pi_state after
	 * reacquiring the locks and before trying to do another fixup. When
	 * the fixup has been done already we simply return.
P
Peter Zijlstra 已提交
2563 2564 2565 2566
	 *
	 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
	 * drop hb->lock since the caller owns the hb -> futex_q relation.
	 * Dropping the pi_mutex->wait_lock requires the state revalidate.
P
Pierre Peiffer 已提交
2567
	 */
2568
handle_err:
P
Peter Zijlstra 已提交
2569
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2570
	spin_unlock(q->lock_ptr);
2571

2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
	switch (err) {
	case -EFAULT:
		ret = fault_in_user_writeable(uaddr);
		break;

	case -EAGAIN:
		cond_resched();
		ret = 0;
		break;

	default:
		WARN_ON_ONCE(1);
		ret = err;
		break;
	}
2587

2588
	spin_lock(q->lock_ptr);
P
Peter Zijlstra 已提交
2589
	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2590

2591 2592 2593
	/*
	 * Check if someone else fixed it for us:
	 */
P
Peter Zijlstra 已提交
2594 2595 2596 2597
	if (pi_state->owner != oldowner) {
		ret = 0;
		goto out_unlock;
	}
2598 2599

	if (ret)
P
Peter Zijlstra 已提交
2600
		goto out_unlock;
2601 2602

	goto retry;
P
Peter Zijlstra 已提交
2603 2604 2605 2606

out_unlock:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
	return ret;
P
Pierre Peiffer 已提交
2607 2608
}

N
Nick Piggin 已提交
2609
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
2610

2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
2621
 * Return:
2622 2623 2624
 *  -  1 - success, lock taken;
 *  -  0 - success, lock not taken;
 *  - <0 - on error (-EFAULT)
2625
 */
2626
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2627 2628 2629 2630 2631 2632 2633
{
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
2634
		 *
2635 2636 2637
		 * Speculative pi_state->owner read (we don't hold wait_lock);
		 * since we own the lock pi_state->owner == current is the
		 * stable state, anything else needs more attention.
2638 2639
		 */
		if (q->pi_state->owner != current)
2640
			ret = fixup_pi_state_owner(uaddr, q, current);
2641 2642 2643
		goto out;
	}

2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
	/*
	 * If we didn't get the lock; check if anybody stole it from us. In
	 * that case, we need to fix up the uval to point to them instead of
	 * us, otherwise bad things happen. [10]
	 *
	 * Another speculative read; pi_state->owner == current is unstable
	 * but needs our attention.
	 */
	if (q->pi_state->owner == current) {
		ret = fixup_pi_state_owner(uaddr, q, NULL);
		goto out;
	}

2657 2658
	/*
	 * Paranoia check. If we did not take the lock, then we should not be
2659
	 * the owner of the rt_mutex.
2660
	 */
2661
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
2662 2663 2664 2665
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);
2666
	}
2667 2668 2669 2670 2671

out:
	return ret ? ret : locked;
}

2672 2673 2674 2675 2676 2677 2678
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
2679
				struct hrtimer_sleeper *timeout)
2680
{
2681 2682
	/*
	 * The task state is guaranteed to be set before another task can
2683
	 * wake it. set_current_state() is implemented using smp_store_mb() and
2684 2685 2686
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
2687
	set_current_state(TASK_INTERRUPTIBLE);
2688
	queue_me(q, hb);
2689 2690

	/* Arm the timer */
2691
	if (timeout)
2692
		hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
2693 2694

	/*
2695 2696
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
2697 2698 2699 2700 2701 2702 2703 2704
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
2705
			freezable_schedule();
2706 2707 2708 2709
	}
	__set_current_state(TASK_RUNNING);
}

2710 2711 2712 2713
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
2714
 * @flags:	futex flags (FLAGS_SHARED, etc.)
2715 2716 2717 2718 2719 2720 2721 2722
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
2723
 * Return:
2724 2725
 *  -  0 - uaddr contains val and hb has been locked;
 *  - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2726
 */
2727
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2728
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
2729
{
2730 2731
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
2732 2733

	/*
D
Darren Hart 已提交
2734
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
2735 2736 2737 2738 2739 2740 2741
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
2742 2743
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
2744 2745
	 * cond(var) false, which would violate the guarantee.
	 *
2746 2747 2748 2749
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
2750
	 */
2751
retry:
2752
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2753
	if (unlikely(ret != 0))
2754
		return ret;
2755 2756 2757 2758

retry_private:
	*hb = queue_lock(q);

2759
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
2760

2761
	if (ret) {
J
Jason Low 已提交
2762
		queue_unlock(*hb);
L
Linus Torvalds 已提交
2763

2764
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
2765
		if (ret)
2766
			goto out;
L
Linus Torvalds 已提交
2767

2768
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2769 2770
			goto retry_private;

2771
		put_futex_key(&q->key);
D
Darren Hart 已提交
2772
		goto retry;
L
Linus Torvalds 已提交
2773
	}
2774

2775
	if (uval != val) {
J
Jason Low 已提交
2776
		queue_unlock(*hb);
2777
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
2778
	}
L
Linus Torvalds 已提交
2779

2780 2781
out:
	if (ret)
2782
		put_futex_key(&q->key);
2783 2784 2785
	return ret;
}

2786 2787
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
2788
{
2789
	struct hrtimer_sleeper timeout, *to;
2790 2791
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
2792
	struct futex_q q = futex_q_init;
2793 2794 2795 2796 2797 2798
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

2799 2800
	to = futex_setup_timer(abs_time, &timeout, flags,
			       current->timer_slack_ns);
T
Thomas Gleixner 已提交
2801
retry:
2802 2803 2804 2805
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
2806
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2807 2808 2809
	if (ret)
		goto out;

2810
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
2811
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
2812 2813

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
2814
	ret = 0;
2815
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
2816
	if (!unqueue_me(&q))
2817
		goto out;
P
Peter Zijlstra 已提交
2818
	ret = -ETIMEDOUT;
2819
	if (to && !to->task)
2820
		goto out;
N
Nick Piggin 已提交
2821

2822
	/*
T
Thomas Gleixner 已提交
2823 2824
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
2825
	 */
2826
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
2827 2828
		goto retry;

P
Peter Zijlstra 已提交
2829
	ret = -ERESTARTSYS;
2830
	if (!abs_time)
2831
		goto out;
L
Linus Torvalds 已提交
2832

2833
	restart = &current->restart_block;
P
Peter Zijlstra 已提交
2834
	restart->fn = futex_wait_restart;
2835
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
2836
	restart->futex.val = val;
T
Thomas Gleixner 已提交
2837
	restart->futex.time = *abs_time;
P
Peter Zijlstra 已提交
2838
	restart->futex.bitset = bitset;
2839
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2840

P
Peter Zijlstra 已提交
2841 2842
	ret = -ERESTART_RESTARTBLOCK;

2843
out:
2844 2845 2846 2847
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
2848 2849 2850
	return ret;
}

N
Nick Piggin 已提交
2851 2852 2853

static long futex_wait_restart(struct restart_block *restart)
{
2854
	u32 __user *uaddr = restart->futex.uaddr;
2855
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
2856

2857
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
T
Thomas Gleixner 已提交
2858
		t = restart->futex.time;
2859 2860
		tp = &t;
	}
N
Nick Piggin 已提交
2861
	restart->fn = do_no_restart_syscall;
2862 2863 2864

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
2865 2866 2867
}


2868 2869 2870
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
2871 2872 2873 2874 2875
 * if there are waiters then it will block as a consequence of relying
 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
 * a 0 value of the futex too.).
 *
 * Also serves as futex trylock_pi()'ing, and due semantics.
2876
 */
2877
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2878
			 ktime_t *time, int trylock)
2879
{
2880
	struct hrtimer_sleeper timeout, *to;
2881
	struct futex_pi_state *pi_state = NULL;
T
Thomas Gleixner 已提交
2882
	struct task_struct *exiting = NULL;
2883
	struct rt_mutex_waiter rt_waiter;
2884
	struct futex_hash_bucket *hb;
2885
	struct futex_q q = futex_q_init;
2886
	int res, ret;
2887

2888 2889 2890
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

2891 2892 2893
	if (refill_pi_state_cache())
		return -ENOMEM;

2894
	to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
2895

2896
retry:
2897
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2898
	if (unlikely(ret != 0))
2899
		goto out;
2900

D
Darren Hart 已提交
2901
retry_private:
E
Eric Sesterhenn 已提交
2902
	hb = queue_lock(&q);
2903

T
Thomas Gleixner 已提交
2904 2905
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
				   &exiting, 0);
2906
	if (unlikely(ret)) {
2907 2908 2909 2910
		/*
		 * Atomic work succeeded and we got the lock,
		 * or failed. Either way, we do _not_ block.
		 */
2911
		switch (ret) {
2912 2913 2914 2915 2916 2917
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2918
		case -EBUSY:
2919 2920
		case -EAGAIN:
			/*
2921
			 * Two reasons for this:
2922
			 * - EBUSY: Task is exiting and we just wait for the
2923
			 *   exit to complete.
2924
			 * - EAGAIN: The user space value changed.
2925
			 */
J
Jason Low 已提交
2926
			queue_unlock(hb);
2927
			put_futex_key(&q.key);
T
Thomas Gleixner 已提交
2928 2929 2930 2931 2932 2933
			/*
			 * Handle the case where the owner is in the middle of
			 * exiting. Wait for the exit to complete otherwise
			 * this task might loop forever, aka. live lock.
			 */
			wait_for_owner_exiting(ret, exiting);
2934 2935 2936
			cond_resched();
			goto retry;
		default:
2937
			goto out_unlock_put_key;
2938 2939 2940
		}
	}

2941 2942
	WARN_ON(!q.pi_state);

2943 2944 2945
	/*
	 * Only actually queue now that the atomic ops are done:
	 */
2946
	__queue_me(&q, hb);
2947

2948
	if (trylock) {
2949
		ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2950 2951
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
2952
		goto no_block;
2953 2954
	}

2955 2956
	rt_mutex_init_waiter(&rt_waiter);

2957
	/*
2958 2959 2960 2961 2962 2963 2964
	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
	 * hold it while doing rt_mutex_start_proxy(), because then it will
	 * include hb->lock in the blocking chain, even through we'll not in
	 * fact hold it while blocking. This will lead it to report -EDEADLK
	 * and BUG when futex_unlock_pi() interleaves with this.
	 *
	 * Therefore acquire wait_lock while holding hb->lock, but drop the
2965 2966 2967 2968
	 * latter before calling __rt_mutex_start_proxy_lock(). This
	 * interleaves with futex_unlock_pi() -- which does a similar lock
	 * handoff -- such that the latter can observe the futex_q::pi_state
	 * before __rt_mutex_start_proxy_lock() is done.
2969
	 */
2970 2971
	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
	spin_unlock(q.lock_ptr);
2972 2973 2974 2975 2976
	/*
	 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
	 * such that futex_unlock_pi() is guaranteed to observe the waiter when
	 * it sees the futex_q::pi_state.
	 */
2977 2978 2979
	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);

2980 2981 2982
	if (ret) {
		if (ret == 1)
			ret = 0;
2983
		goto cleanup;
2984 2985 2986
	}

	if (unlikely(to))
2987
		hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
2988 2989 2990

	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);

2991
cleanup:
2992
	spin_lock(q.lock_ptr);
2993
	/*
2994
	 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2995
	 * first acquire the hb->lock before removing the lock from the
2996 2997
	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
	 * lists consistent.
2998 2999 3000
	 *
	 * In particular; it is important that futex_unlock_pi() can not
	 * observe this inconsistency.
3001 3002 3003 3004 3005
	 */
	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
		ret = 0;

no_block:
3006 3007 3008 3009
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
3010
	res = fixup_owner(uaddr, &q, !ret);
3011 3012 3013 3014 3015 3016
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
3017

3018
	/*
3019 3020
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
3021
	 */
3022 3023 3024 3025
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
		pi_state = q.pi_state;
		get_pi_state(pi_state);
	}
3026

3027 3028
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
3029

3030 3031 3032 3033 3034
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

3035
	goto out_put_key;
3036

3037
out_unlock_put_key:
J
Jason Low 已提交
3038
	queue_unlock(hb);
3039

3040
out_put_key:
3041
	put_futex_key(&q.key);
3042
out:
3043 3044
	if (to) {
		hrtimer_cancel(&to->timer);
3045
		destroy_hrtimer_on_stack(&to->timer);
3046
	}
3047
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
3048

3049
uaddr_faulted:
J
Jason Low 已提交
3050
	queue_unlock(hb);
3051

3052
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
3053 3054
	if (ret)
		goto out_put_key;
3055

3056
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
3057 3058
		goto retry_private;

3059
	put_futex_key(&q.key);
D
Darren Hart 已提交
3060
	goto retry;
3061 3062 3063 3064 3065 3066 3067
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
3068
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
3069
{
3070
	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
3071
	union futex_key key = FUTEX_KEY_INIT;
3072
	struct futex_hash_bucket *hb;
3073
	struct futex_q *top_waiter;
D
Darren Hart 已提交
3074
	int ret;
3075

3076 3077 3078
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

3079 3080 3081 3082 3083 3084
retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
3085
	if ((uval & FUTEX_TID_MASK) != vpid)
3086 3087
		return -EPERM;

3088
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
3089 3090
	if (ret)
		return ret;
3091 3092 3093 3094 3095

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
3096 3097 3098
	 * Check waiters first. We do not trust user space values at
	 * all and we at least want to know if user space fiddled
	 * with the futex value instead of blindly unlocking.
3099
	 */
3100 3101
	top_waiter = futex_top_waiter(hb, &key);
	if (top_waiter) {
3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114
		struct futex_pi_state *pi_state = top_waiter->pi_state;

		ret = -EINVAL;
		if (!pi_state)
			goto out_unlock;

		/*
		 * If current does not own the pi_state then the futex is
		 * inconsistent and user space fiddled with the futex value.
		 */
		if (pi_state->owner != current)
			goto out_unlock;

3115
		get_pi_state(pi_state);
3116
		/*
3117 3118 3119 3120
		 * By taking wait_lock while still holding hb->lock, we ensure
		 * there is no point where we hold neither; and therefore
		 * wake_futex_pi() must observe a state consistent with what we
		 * observed.
3121 3122 3123 3124
		 *
		 * In particular; this forces __rt_mutex_start_proxy() to
		 * complete such that we're guaranteed to observe the
		 * rt_waiter. Also see the WARN in wake_futex_pi().
3125
		 */
3126
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3127 3128
		spin_unlock(&hb->lock);

3129
		/* drops pi_state->pi_mutex.wait_lock */
3130 3131 3132 3133 3134 3135
		ret = wake_futex_pi(uaddr, uval, pi_state);

		put_pi_state(pi_state);

		/*
		 * Success, we're done! No tricky corner cases.
3136 3137 3138
		 */
		if (!ret)
			goto out_putkey;
3139
		/*
3140 3141
		 * The atomic access to the futex value generated a
		 * pagefault, so retry the user-access and the wakeup:
3142 3143 3144
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
3145 3146 3147 3148
		/*
		 * A unconditional UNLOCK_PI op raced against a waiter
		 * setting the FUTEX_WAITERS bit. Try again.
		 */
3149 3150
		if (ret == -EAGAIN)
			goto pi_retry;
3151 3152 3153 3154
		/*
		 * wake_futex_pi has detected invalid state. Tell user
		 * space.
		 */
3155
		goto out_putkey;
3156
	}
3157

3158
	/*
3159 3160 3161 3162 3163
	 * We have no kernel internal state, i.e. no waiters in the
	 * kernel. Waiters which are about to queue themselves are stuck
	 * on hb->lock. So we can safely ignore them. We do neither
	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
	 * owner.
3164
	 */
3165
	if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3166
		spin_unlock(&hb->lock);
3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
		switch (ret) {
		case -EFAULT:
			goto pi_faulted;

		case -EAGAIN:
			goto pi_retry;

		default:
			WARN_ON_ONCE(1);
			goto out_putkey;
		}
3178
	}
3179

3180 3181 3182 3183 3184
	/*
	 * If uval has changed, let user space handle it.
	 */
	ret = (curval == uval) ? 0 : -EAGAIN;

3185 3186
out_unlock:
	spin_unlock(&hb->lock);
3187
out_putkey:
3188
	put_futex_key(&key);
3189 3190
	return ret;

3191 3192 3193 3194 3195
pi_retry:
	put_futex_key(&key);
	cond_resched();
	goto retry;

3196
pi_faulted:
3197
	put_futex_key(&key);
3198

3199
	ret = fault_in_user_writeable(uaddr);
3200
	if (!ret)
3201 3202
		goto retry;

L
Linus Torvalds 已提交
3203 3204 3205
	return ret;
}

3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
3218
 * Return:
3219 3220
 *  -  0 = no early wakeup detected;
 *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
3242
		plist_del(&q->list, &hb->chain);
3243
		hb_waiters_dec(hb);
3244

T
Thomas Gleixner 已提交
3245
		/* Handle spurious wakeups gracefully */
3246
		ret = -EWOULDBLOCK;
3247 3248
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
3249
		else if (signal_pending(current))
3250
			ret = -ERESTARTNOINTR;
3251 3252 3253 3254 3255 3256
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3257
 * @uaddr:	the futex we initially wait on (non-pi)
3258
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3259
 *		the same type, no requeueing from private to shared, etc.
3260 3261
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
3262
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
3263 3264 3265
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3266 3267 3268 3269 3270
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
3271 3272
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3273
 * via the following--
3274
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3275 3276 3277
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
3278
 *
3279
 * If 3, cleanup and return -ERESTARTNOINTR.
3280 3281 3282 3283 3284 3285 3286
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
3287
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3288 3289 3290
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
3291
 * Return:
3292 3293
 *  -  0 - On success;
 *  - <0 - On error
3294
 */
3295
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3296
				 u32 val, ktime_t *abs_time, u32 bitset,
3297
				 u32 __user *uaddr2)
3298
{
3299
	struct hrtimer_sleeper timeout, *to;
3300
	struct futex_pi_state *pi_state = NULL;
3301 3302
	struct rt_mutex_waiter rt_waiter;
	struct futex_hash_bucket *hb;
3303 3304
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
3305 3306
	int res, ret;

3307 3308 3309
	if (!IS_ENABLED(CONFIG_FUTEX_PI))
		return -ENOSYS;

3310 3311 3312
	if (uaddr == uaddr2)
		return -EINVAL;

3313 3314 3315
	if (!bitset)
		return -EINVAL;

3316 3317
	to = futex_setup_timer(abs_time, &timeout, flags,
			       current->timer_slack_ns);
3318 3319 3320 3321 3322

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
3323
	rt_mutex_init_waiter(&rt_waiter);
3324

3325
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3326 3327 3328
	if (unlikely(ret != 0))
		goto out;

3329 3330 3331 3332
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

3333 3334 3335 3336
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
3337
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
3338 3339
	if (ret)
		goto out_key2;
3340

3341 3342 3343 3344 3345
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (match_futex(&q.key, &key2)) {
3346
		queue_unlock(hb);
3347 3348 3349 3350
		ret = -EINVAL;
		goto out_put_keys;
	}

3351
	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
3352
	futex_wait_queue_me(hb, &q, to);
3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
		goto out_put_keys;

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
3364 3365 3366
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
3377
			ret = fixup_pi_state_owner(uaddr2, &q, current);
3378 3379 3380 3381
			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
				pi_state = q.pi_state;
				get_pi_state(pi_state);
			}
3382 3383 3384 3385
			/*
			 * Drop the reference to the pi state which
			 * the requeue_pi() code acquired for us.
			 */
3386
			put_pi_state(q.pi_state);
3387 3388 3389
			spin_unlock(q.lock_ptr);
		}
	} else {
3390 3391
		struct rt_mutex *pi_mutex;

3392 3393 3394 3395 3396
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
3397
		WARN_ON(!q.pi_state);
3398
		pi_mutex = &q.pi_state->pi_mutex;
3399
		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3400 3401

		spin_lock(q.lock_ptr);
3402 3403 3404 3405
		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
			ret = 0;

		debug_rt_mutex_free_waiter(&rt_waiter);
3406 3407 3408 3409
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
3410
		res = fixup_owner(uaddr2, &q, !ret);
3411 3412
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
3413
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3414 3415 3416 3417
		 */
		if (res)
			ret = (res < 0) ? res : 0;

3418 3419 3420 3421 3422
		/*
		 * If fixup_pi_state_owner() faulted and was unable to handle
		 * the fault, unlock the rt_mutex and return the fault to
		 * userspace.
		 */
3423 3424 3425 3426
		if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
			pi_state = q.pi_state;
			get_pi_state(pi_state);
		}
3427

3428 3429 3430 3431
		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

3432 3433 3434 3435 3436
	if (pi_state) {
		rt_mutex_futex_unlock(&pi_state->pi_mutex);
		put_pi_state(pi_state);
	}

3437
	if (ret == -EINTR) {
3438
		/*
3439 3440 3441 3442 3443
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
3444
		 */
3445
		ret = -EWOULDBLOCK;
3446 3447 3448
	}

out_put_keys:
3449
	put_futex_key(&q.key);
T
Thomas Gleixner 已提交
3450
out_key2:
3451
	put_futex_key(&key2);
3452 3453 3454 3455 3456 3457 3458 3459 3460

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

3461 3462 3463 3464 3465 3466 3467
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
3468
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3469 3470 3471 3472 3473 3474 3475 3476
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
3477 3478 3479
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
3480
 */
3481 3482
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
3483
{
3484 3485
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
3498 3499 3500 3501
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
3502
 */
3503 3504 3505
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
3506
{
A
Al Viro 已提交
3507
	struct robust_list_head __user *head;
3508
	unsigned long ret;
3509
	struct task_struct *p;
3510

3511 3512 3513
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

3514 3515 3516
	rcu_read_lock();

	ret = -ESRCH;
3517
	if (!pid)
3518
		p = current;
3519
	else {
3520
		p = find_task_by_vpid(pid);
3521 3522 3523 3524
		if (!p)
			goto err_unlock;
	}

3525
	ret = -EPERM;
3526
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3527 3528 3529 3530 3531
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

3532 3533 3534 3535 3536
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
3537
	rcu_read_unlock();
3538 3539 3540 3541

	return ret;
}

Y
Yang Tao 已提交
3542 3543 3544 3545
/* Constants for the pending_op argument of handle_futex_death */
#define HANDLE_DEATH_PENDING	true
#define HANDLE_DEATH_LIST	false

3546 3547 3548 3549
/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
Y
Yang Tao 已提交
3550 3551
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
			      bool pi, bool pending_op)
3552
{
3553
	u32 uval, uninitialized_var(nval), mval;
3554
	int err;
3555

3556 3557 3558 3559
	/* Futex address must be 32bit aligned */
	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
		return -1;

3560 3561
retry:
	if (get_user(uval, uaddr))
3562 3563
		return -1;

Y
Yang Tao 已提交
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599
	/*
	 * Special case for regular (non PI) futexes. The unlock path in
	 * user space has two race scenarios:
	 *
	 * 1. The unlock path releases the user space futex value and
	 *    before it can execute the futex() syscall to wake up
	 *    waiters it is killed.
	 *
	 * 2. A woken up waiter is killed before it can acquire the
	 *    futex in user space.
	 *
	 * In both cases the TID validation below prevents a wakeup of
	 * potential waiters which can cause these waiters to block
	 * forever.
	 *
	 * In both cases the following conditions are met:
	 *
	 *	1) task->robust_list->list_op_pending != NULL
	 *	   @pending_op == true
	 *	2) User space futex value == 0
	 *	3) Regular futex: @pi == false
	 *
	 * If these conditions are met, it is safe to attempt waking up a
	 * potential waiter without touching the user space futex value and
	 * trying to set the OWNER_DIED bit. The user space futex value is
	 * uncontended and the rest of the user space mutex state is
	 * consistent, so a woken waiter will just take over the
	 * uncontended futex. Setting the OWNER_DIED bit would create
	 * inconsistent state and malfunction of the user space owner died
	 * handling.
	 */
	if (pending_op && !pi && !uval) {
		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
		return 0;
	}

3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
		return 0;

	/*
	 * Ok, this dying thread is truly holding a futex
	 * of interest. Set the OWNER_DIED bit atomically
	 * via cmpxchg, and if the value had FUTEX_WAITERS
	 * set, wake up a waiter (if any). (We have to do a
	 * futex_wake() even if OWNER_DIED is already set -
	 * to handle the rare but possible case of recursive
	 * thread-death.) The rest of the cleanup is done in
	 * userspace.
	 */
	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;

	/*
	 * We are not holding a lock here, but we want to have
	 * the pagefault_disable/enable() protection because
	 * we want to handle the fault gracefully. If the
	 * access fails we try to fault in the futex with R/W
	 * verification via get_user_pages. get_user() above
	 * does not guarantee R/W access. If that fails we
	 * give up and leave the futex locked.
	 */
	if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
		switch (err) {
		case -EFAULT:
3627 3628 3629
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
3630 3631 3632

		case -EAGAIN:
			cond_resched();
3633
			goto retry;
3634

3635 3636 3637 3638
		default:
			WARN_ON_ONCE(1);
			return err;
		}
3639
	}
3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650

	if (nval != uval)
		goto retry;

	/*
	 * Wake robust non-PI futexes here. The wakeup of
	 * PI futexes happens in exit_pi_state():
	 */
	if (!pi && (uval & FUTEX_WAITERS))
		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);

3651 3652 3653
	return 0;
}

3654 3655 3656 3657
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
3658
				     struct robust_list __user * __user *head,
3659
				     unsigned int *pi)
3660 3661 3662
{
	unsigned long uentry;

A
Al Viro 已提交
3663
	if (get_user(uentry, (unsigned long __user *)head))
3664 3665
		return -EFAULT;

A
Al Viro 已提交
3666
	*entry = (void __user *)(uentry & ~1UL);
3667 3668 3669 3670 3671
	*pi = uentry & 1;

	return 0;
}

3672 3673 3674 3675 3676 3677
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
3678
static void exit_robust_list(struct task_struct *curr)
3679 3680
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
3681
	struct robust_list __user *entry, *next_entry, *pending;
3682 3683
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
3684
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
3685
	int rc;
3686

3687 3688 3689
	if (!futex_cmpxchg_enabled)
		return;

3690 3691 3692 3693
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
3694
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
3695 3696 3697 3698 3699 3700 3701 3702 3703 3704
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
3705
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3706
		return;
3707

M
Martin Schwidefsky 已提交
3708
	next_entry = NULL;	/* avoid warning with gcc */
3709
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
3710 3711 3712 3713 3714
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3715 3716
		/*
		 * A pending lock might already be on the list, so
3717
		 * don't process it twice:
3718
		 */
Y
Yang Tao 已提交
3719
		if (entry != pending) {
A
Al Viro 已提交
3720
			if (handle_futex_death((void __user *)entry + futex_offset,
Y
Yang Tao 已提交
3721
						curr, pi, HANDLE_DEATH_LIST))
3722
				return;
Y
Yang Tao 已提交
3723
		}
M
Martin Schwidefsky 已提交
3724
		if (rc)
3725
			return;
M
Martin Schwidefsky 已提交
3726 3727
		entry = next_entry;
		pi = next_pi;
3728 3729 3730 3731 3732 3733 3734 3735
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
3736

Y
Yang Tao 已提交
3737
	if (pending) {
M
Martin Schwidefsky 已提交
3738
		handle_futex_death((void __user *)pending + futex_offset,
Y
Yang Tao 已提交
3739 3740
				   curr, pip, HANDLE_DEATH_PENDING);
	}
3741 3742
}

3743
static void futex_cleanup(struct task_struct *tsk)
3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
{
	if (unlikely(tsk->robust_list)) {
		exit_robust_list(tsk);
		tsk->robust_list = NULL;
	}

#ifdef CONFIG_COMPAT
	if (unlikely(tsk->compat_robust_list)) {
		compat_exit_robust_list(tsk);
		tsk->compat_robust_list = NULL;
	}
#endif

	if (unlikely(!list_empty(&tsk->pi_state_list)))
		exit_pi_state_list(tsk);
}

3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779
/**
 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
 * @tsk:	task to set the state on
 *
 * Set the futex exit state of the task lockless. The futex waiter code
 * observes that state when a task is exiting and loops until the task has
 * actually finished the futex cleanup. The worst case for this is that the
 * waiter runs through the wait loop until the state becomes visible.
 *
 * This is called from the recursive fault handling path in do_exit().
 *
 * This is best effort. Either the futex exit code has run already or
 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
 * take it over. If not, the problem is pushed back to user space. If the
 * futex exit code did not run yet, then an already queued waiter might
 * block forever, but there is nothing which can be done about that.
 */
void futex_exit_recursive(struct task_struct *tsk)
{
3780 3781 3782
	/* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
	if (tsk->futex_state == FUTEX_STATE_EXITING)
		mutex_unlock(&tsk->futex_exit_mutex);
3783 3784 3785
	tsk->futex_state = FUTEX_STATE_DEAD;
}

3786
static void futex_cleanup_begin(struct task_struct *tsk)
3787
{
3788 3789 3790 3791 3792 3793 3794 3795
	/*
	 * Prevent various race issues against a concurrent incoming waiter
	 * including live locks by forcing the waiter to block on
	 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
	 * attach_to_pi_owner().
	 */
	mutex_lock(&tsk->futex_exit_mutex);

3796
	/*
3797 3798 3799 3800 3801 3802 3803 3804 3805
	 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
	 *
	 * This ensures that all subsequent checks of tsk->futex_state in
	 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
	 * tsk->pi_lock held.
	 *
	 * It guarantees also that a pi_state which was queued right before
	 * the state change under tsk->pi_lock by a concurrent waiter must
	 * be observed in exit_pi_state_list().
3806 3807
	 */
	raw_spin_lock_irq(&tsk->pi_lock);
3808
	tsk->futex_state = FUTEX_STATE_EXITING;
3809
	raw_spin_unlock_irq(&tsk->pi_lock);
3810
}
3811

3812 3813 3814 3815 3816 3817 3818
static void futex_cleanup_end(struct task_struct *tsk, int state)
{
	/*
	 * Lockless store. The only side effect is that an observer might
	 * take another loop until it becomes visible.
	 */
	tsk->futex_state = state;
3819 3820 3821 3822 3823
	/*
	 * Drop the exit protection. This unblocks waiters which observed
	 * FUTEX_STATE_EXITING to reevaluate the state.
	 */
	mutex_unlock(&tsk->futex_exit_mutex);
3824
}
3825

3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848
void futex_exec_release(struct task_struct *tsk)
{
	/*
	 * The state handling is done for consistency, but in the case of
	 * exec() there is no way to prevent futher damage as the PID stays
	 * the same. But for the unlikely and arguably buggy case that a
	 * futex is held on exec(), this provides at least as much state
	 * consistency protection which is possible.
	 */
	futex_cleanup_begin(tsk);
	futex_cleanup(tsk);
	/*
	 * Reset the state to FUTEX_STATE_OK. The task is alive and about
	 * exec a new binary.
	 */
	futex_cleanup_end(tsk, FUTEX_STATE_OK);
}

void futex_exit_release(struct task_struct *tsk)
{
	futex_cleanup_begin(tsk);
	futex_cleanup(tsk);
	futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
3849 3850
}

3851
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3852
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
3853
{
T
Thomas Gleixner 已提交
3854
	int cmd = op & FUTEX_CMD_MASK;
3855
	unsigned int flags = 0;
E
Eric Dumazet 已提交
3856 3857

	if (!(op & FUTEX_PRIVATE_FLAG))
3858
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
3859

3860 3861
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
3862 3863
		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
		    cmd != FUTEX_WAIT_REQUEUE_PI)
3864 3865
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
3866

3867 3868 3869 3870 3871 3872 3873 3874 3875 3876
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
3877
	switch (cmd) {
L
Linus Torvalds 已提交
3878
	case FUTEX_WAIT:
3879
		val3 = FUTEX_BITSET_MATCH_ANY;
3880
		/* fall through */
3881
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
3882
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
3883
	case FUTEX_WAKE:
3884
		val3 = FUTEX_BITSET_MATCH_ANY;
3885
		/* fall through */
3886
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
3887
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
3888
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
3889
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
3890
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
3891
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3892
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
3893
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3894
	case FUTEX_LOCK_PI:
3895
		return futex_lock_pi(uaddr, flags, timeout, 0);
3896
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
3897
		return futex_unlock_pi(uaddr, flags);
3898
	case FUTEX_TRYLOCK_PI:
3899
		return futex_lock_pi(uaddr, flags, NULL, 1);
3900 3901
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
3902 3903
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
3904
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
3905
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
3906
	}
T
Thomas Gleixner 已提交
3907
	return -ENOSYS;
L
Linus Torvalds 已提交
3908 3909 3910
}


3911
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3912
		struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3913
		u32, val3)
L
Linus Torvalds 已提交
3914
{
3915
	struct timespec64 ts;
3916
	ktime_t t, *tp = NULL;
3917
	u32 val2 = 0;
E
Eric Dumazet 已提交
3918
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
3919

3920
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3921 3922
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3923 3924
		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
			return -EFAULT;
3925
		if (get_timespec64(&ts, utime))
L
Linus Torvalds 已提交
3926
			return -EFAULT;
3927
		if (!timespec64_valid(&ts))
3928
			return -EINVAL;
3929

3930
		t = timespec64_to_ktime(ts);
E
Eric Dumazet 已提交
3931
		if (cmd == FUTEX_WAIT)
3932
			t = ktime_add_safe(ktime_get(), t);
3933
		tp = &t;
L
Linus Torvalds 已提交
3934 3935
	}
	/*
3936
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3937
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
3938
	 */
3939
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3940
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3941
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
3942

3943
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
3944 3945
}

3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977
#ifdef CONFIG_COMPAT
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int
compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
		   compat_uptr_t __user *head, unsigned int *pi)
{
	if (get_user(*uentry, head))
		return -EFAULT;

	*entry = compat_ptr((*uentry) & ~1);
	*pi = (unsigned int)(*uentry) & 1;

	return 0;
}

static void __user *futex_uaddr(struct robust_list __user *entry,
				compat_long_t futex_offset)
{
	compat_uptr_t base = ptr_to_compat(entry);
	void __user *uaddr = compat_ptr(base + futex_offset);

	return uaddr;
}

/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
3978
static void compat_exit_robust_list(struct task_struct *curr)
3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
{
	struct compat_robust_list_head __user *head = curr->compat_robust_list;
	struct robust_list __user *entry, *next_entry, *pending;
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
	compat_uptr_t uentry, next_uentry, upending;
	compat_long_t futex_offset;
	int rc;

	if (!futex_cmpxchg_enabled)
		return;

	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
	if (compat_fetch_robust_entry(&upending, &pending,
			       &head->list_op_pending, &pip))
		return;

	next_entry = NULL;	/* avoid warning with gcc */
	while (entry != (struct robust_list __user *) &head->list) {
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
			(compat_uptr_t __user *)&entry->next, &next_pi);
		/*
		 * A pending lock might already be on the list, so
		 * dont process it twice:
		 */
		if (entry != pending) {
			void __user *uaddr = futex_uaddr(entry, futex_offset);

Y
Yang Tao 已提交
4025 4026
			if (handle_futex_death(uaddr, curr, pi,
					       HANDLE_DEATH_LIST))
4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044
				return;
		}
		if (rc)
			return;
		uentry = next_uentry;
		entry = next_entry;
		pi = next_pi;
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
	if (pending) {
		void __user *uaddr = futex_uaddr(pending, futex_offset);

Y
Yang Tao 已提交
4045
		handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101
	}
}

COMPAT_SYSCALL_DEFINE2(set_robust_list,
		struct compat_robust_list_head __user *, head,
		compat_size_t, len)
{
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->compat_robust_list = head;

	return 0;
}

COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
			compat_uptr_t __user *, head_ptr,
			compat_size_t __user *, len_ptr)
{
	struct compat_robust_list_head __user *head;
	unsigned long ret;
	struct task_struct *p;

	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	rcu_read_lock();

	ret = -ESRCH;
	if (!pid)
		p = current;
	else {
		p = find_task_by_vpid(pid);
		if (!p)
			goto err_unlock;
	}

	ret = -EPERM;
	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
		goto err_unlock;

	head = p->compat_robust_list;
	rcu_read_unlock();

	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(ptr_to_compat(head), head_ptr);

err_unlock:
	rcu_read_unlock();

	return ret;
}
4102
#endif /* CONFIG_COMPAT */
4103

4104
#ifdef CONFIG_COMPAT_32BIT_TIME
4105
SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
4106 4107 4108
		struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
		u32, val3)
{
4109
	struct timespec64 ts;
4110 4111 4112 4113 4114 4115 4116
	ktime_t t, *tp = NULL;
	int val2 = 0;
	int cmd = op & FUTEX_CMD_MASK;

	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
4117
		if (get_old_timespec32(&ts, utime))
4118
			return -EFAULT;
4119
		if (!timespec64_valid(&ts))
4120 4121
			return -EINVAL;

4122
		t = timespec64_to_ktime(ts);
4123 4124 4125 4126 4127 4128 4129 4130 4131 4132
		if (cmd == FUTEX_WAIT)
			t = ktime_add_safe(ktime_get(), t);
		tp = &t;
	}
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
		val2 = (int) (unsigned long) utime;

	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
4133
#endif /* CONFIG_COMPAT_32BIT_TIME */
4134

4135
static void __init futex_detect_cmpxchg(void)
L
Linus Torvalds 已提交
4136
{
4137
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
4138
	u32 curval;
4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156

	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
	 * implementation, the non-functional ones will return
	 * -ENOSYS.
	 */
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
		futex_cmpxchg_enabled = 1;
#endif
}

static int __init futex_init(void)
{
4157
	unsigned int futex_shift;
4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168
	unsigned long i;

#if CONFIG_BASE_SMALL
	futex_hashsize = 16;
#else
	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif

	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
					       futex_hashsize, 0,
					       futex_hashsize < 256 ? HASH_SMALL : 0,
4169 4170 4171
					       &futex_shift, NULL,
					       futex_hashsize, futex_hashsize);
	futex_hashsize = 1UL << futex_shift;
4172 4173

	futex_detect_cmpxchg();
4174

4175
	for (i = 0; i < futex_hashsize; i++) {
4176
		atomic_set(&futex_queues[i].waiters, 0);
4177
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
4178 4179 4180
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
4181 4182
	return 0;
}
4183
core_initcall(futex_init);