futex.c 81.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
11 12 13 14
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
15 16 17 18
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
19 20 21
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
22 23 24 25
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
57
#include <linux/signal.h>
58
#include <linux/export.h>
59
#include <linux/magic.h>
60 61
#include <linux/pid.h>
#include <linux/nsproxy.h>
62
#include <linux/ptrace.h>
63
#include <linux/sched/rt.h>
64
#include <linux/hugetlb.h>
C
Colin Cross 已提交
65
#include <linux/freezer.h>
66
#include <linux/bootmem.h>
67

68
#include <asm/futex.h>
L
Linus Torvalds 已提交
69

70
#include "locking/rtmutex_common.h"
71

72
/*
73 74 75 76
 * READ this before attempting to hack on futexes!
 *
 * Basic futex operation and ordering guarantees
 * =============================================
77 78 79 80
 *
 * The waiter reads the futex value in user space and calls
 * futex_wait(). This function computes the hash bucket and acquires
 * the hash bucket lock. After that it reads the futex user space value
81 82 83
 * again and verifies that the data has not changed. If it has not changed
 * it enqueues itself into the hash bucket, releases the hash bucket lock
 * and schedules.
84 85
 *
 * The waker side modifies the user space value of the futex and calls
86 87 88
 * futex_wake(). This function computes the hash bucket and acquires the
 * hash bucket lock. Then it looks for waiters on that futex in the hash
 * bucket and wakes them.
89
 *
90 91 92 93 94
 * In futex wake up scenarios where no tasks are blocked on a futex, taking
 * the hb spinlock can be avoided and simply return. In order for this
 * optimization to work, ordering guarantees must exist so that the waiter
 * being added to the list is acknowledged when the list is concurrently being
 * checked by the waker, avoiding scenarios like the following:
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
 *
 * CPU 0                               CPU 1
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
 *   uval = *futex;
 *                                     *futex = newval;
 *                                     sys_futex(WAKE, futex);
 *                                       futex_wake(futex);
 *                                       if (queue_empty())
 *                                         return;
 *   if (uval == val)
 *      lock(hash_bucket(futex));
 *      queue();
 *     unlock(hash_bucket(futex));
 *     schedule();
 *
 * This would cause the waiter on CPU 0 to wait forever because it
 * missed the transition of the user space value from val to newval
 * and the waker did not find the waiter in the hash bucket queue.
 *
116 117 118 119 120
 * The correct serialization ensures that a waiter either observes
 * the changed user space value before blocking or is woken by a
 * concurrent waker:
 *
 * CPU 0                                 CPU 1
121 122 123
 * val = *futex;
 * sys_futex(WAIT, futex, val);
 *   futex_wait(futex, val);
124
 *
125
 *   waiters++; (a)
126 127 128 129 130 131 132 133 134 135
 *   mb(); (A) <-- paired with -.
 *                              |
 *   lock(hash_bucket(futex));  |
 *                              |
 *   uval = *futex;             |
 *                              |        *futex = newval;
 *                              |        sys_futex(WAKE, futex);
 *                              |          futex_wake(futex);
 *                              |
 *                              `------->  mb(); (B)
136
 *   if (uval == val)
137
 *     queue();
138
 *     unlock(hash_bucket(futex));
139 140
 *     schedule();                         if (waiters)
 *                                           lock(hash_bucket(futex));
141 142
 *   else                                    wake_waiters(futex);
 *     waiters--; (b)                        unlock(hash_bucket(futex));
143
 *
144 145 146 147 148
 * Where (A) orders the waiters increment and the futex value read through
 * atomic operations (see hb_waiters_inc) and where (B) orders the write
 * to futex and the waiters read -- this is done by the barriers in
 * get_futex_key_refs(), through either ihold or atomic_inc, depending on the
 * futex type.
149 150 151 152 153 154 155 156 157 158 159 160
 *
 * This yields the following case (where X:=waiters, Y:=futex):
 *
 *	X = Y = 0
 *
 *	w[X]=1		w[Y]=1
 *	MB		MB
 *	r[Y]=y		r[X]=x
 *
 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 * the guarantee that we cannot both miss the futex variable change and the
 * enqueue.
161 162 163 164 165 166 167 168 169 170 171
 *
 * Note that a new waiter is accounted for in (a) even when it is possible that
 * the wait call can return error, in which case we backtrack from it in (b).
 * Refer to the comment in queue_lock().
 *
 * Similarly, in order to account for waiters being requeued on another
 * address we always increment the waiters for the destination bucket before
 * acquiring the lock. It then decrements them again  after releasing it -
 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 * will do the additional required waiter count housekeeping. This is done for
 * double_lock_hb() and double_unlock_hb(), respectively.
172 173
 */

174
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175
int __read_mostly futex_cmpxchg_enabled;
176
#endif
177

178 179 180 181 182 183 184 185
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
#define FLAGS_SHARED		0x01
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
	atomic_t refcount;

	union futex_key key;
};

207 208
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
209
 * @list:		priority-sorted list of tasks waiting on this futex
210 211 212 213 214 215 216 217 218
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
L
Linus Torvalds 已提交
219 220 221
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
222
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
223
 * The order of wakeup is always to make the first condition true, then
224 225 226 227
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
228 229
 */
struct futex_q {
P
Pierre Peiffer 已提交
230
	struct plist_node list;
L
Linus Torvalds 已提交
231

232
	struct task_struct *task;
L
Linus Torvalds 已提交
233 234
	spinlock_t *lock_ptr;
	union futex_key key;
235
	struct futex_pi_state *pi_state;
236
	struct rt_mutex_waiter *rt_waiter;
237
	union futex_key *requeue_pi_key;
238
	u32 bitset;
L
Linus Torvalds 已提交
239 240
};

241 242 243 244 245 246
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
247
/*
D
Darren Hart 已提交
248 249 250
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
251 252
 */
struct futex_hash_bucket {
253
	atomic_t waiters;
P
Pierre Peiffer 已提交
254 255
	spinlock_t lock;
	struct plist_head chain;
256
} ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
257

258 259 260
static unsigned long __read_mostly futex_hashsize;

static struct futex_hash_bucket *futex_queues;
L
Linus Torvalds 已提交
261

262 263 264 265 266 267 268 269
static inline void futex_get_mm(union futex_key *key)
{
	atomic_inc(&key->private.mm->mm_count);
	/*
	 * Ensure futex_get_mm() implies a full barrier such that
	 * get_futex_key() implies a full barrier. This is relied upon
	 * as full barrier (B), see the ordering comment above.
	 */
270
	smp_mb__after_atomic();
271 272
}

273 274 275 276
/*
 * Reflects a new waiter being added to the waitqueue.
 */
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
277 278
{
#ifdef CONFIG_SMP
279
	atomic_inc(&hb->waiters);
280
	/*
281
	 * Full barrier (A), see the ordering comment above.
282
	 */
283
	smp_mb__after_atomic();
284 285 286 287 288 289 290 291 292 293 294 295 296
#endif
}

/*
 * Reflects a waiter being removed from the waitqueue by wakeup
 * paths.
 */
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	atomic_dec(&hb->waiters);
#endif
}
297

298 299 300 301
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
	return atomic_read(&hb->waiters);
302
#else
303
	return 1;
304 305 306
#endif
}

L
Linus Torvalds 已提交
307 308 309 310 311 312 313 314
/*
 * We hash on the keys returned from get_futex_key (see below).
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
315
	return &futex_queues[hash & (futex_hashsize - 1)];
L
Linus Torvalds 已提交
316 317 318 319 320 321 322
}

/*
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
323 324
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
325 326 327 328
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

329 330 331 332 333 334 335 336 337 338 339 340
/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
static void get_futex_key_refs(union futex_key *key)
{
	if (!key->both.ptr)
		return;

	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
341
		ihold(key->shared.inode); /* implies MB (B) */
342 343
		break;
	case FUT_OFF_MMSHARED:
344
		futex_get_mm(key); /* implies MB (B) */
345 346 347 348 349 350 351 352 353 354
		break;
	}
}

/*
 * Drop a reference to the resource addressed by a key.
 * The hash bucket spinlock must not be held.
 */
static void drop_futex_key_refs(union futex_key *key)
{
355 356 357
	if (!key->both.ptr) {
		/* If we're here then we tried to put a key we failed to get */
		WARN_ON_ONCE(1);
358
		return;
359
	}
360 361 362 363 364 365 366 367 368 369 370

	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
		iput(key->shared.inode);
		break;
	case FUT_OFF_MMSHARED:
		mmdrop(key->private.mm);
		break;
	}
}

E
Eric Dumazet 已提交
371
/**
372 373 374 375
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 * @key:	address where result is stored.
376 377
 * @rw:		mapping needs to be read/write (values: VERIFY_READ,
 *              VERIFY_WRITE)
E
Eric Dumazet 已提交
378
 *
379 380
 * Return: a negative error code or 0
 *
E
Eric Dumazet 已提交
381
 * The key words are stored in *key on success.
L
Linus Torvalds 已提交
382
 *
A
Al Viro 已提交
383
 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
L
Linus Torvalds 已提交
384 385 386
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
D
Darren Hart 已提交
387
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
388
 */
389
static int
390
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
L
Linus Torvalds 已提交
391
{
392
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
393
	struct mm_struct *mm = current->mm;
394
	struct page *page, *page_head;
395
	int err, ro = 0;
L
Linus Torvalds 已提交
396 397 398 399

	/*
	 * The futex address must be "naturally" aligned.
	 */
400
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
401
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
402
		return -EINVAL;
403
	address -= key->both.offset;
L
Linus Torvalds 已提交
404

405 406 407
	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
		return -EFAULT;

E
Eric Dumazet 已提交
408 409 410 411 412 413 414 415 416 417
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		key->private.mm = mm;
		key->private.address = address;
418
		get_futex_key_refs(key);  /* implies MB (B) */
E
Eric Dumazet 已提交
419 420
		return 0;
	}
L
Linus Torvalds 已提交
421

422
again:
423
	err = get_user_pages_fast(address, 1, 1, &page);
424 425 426 427 428 429 430 431
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
	if (err == -EFAULT && rw == VERIFY_READ) {
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
432 433
	if (err < 0)
		return err;
434 435
	else
		err = 0;
436

437 438 439
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	page_head = page;
	if (unlikely(PageTail(page))) {
440
		put_page(page);
441 442
		/* serialize against __split_huge_page_splitting() */
		local_irq_disable();
443
		if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
			page_head = compound_head(page);
			/*
			 * page_head is valid pointer but we must pin
			 * it before taking the PG_lock and/or
			 * PG_compound_lock. The moment we re-enable
			 * irqs __split_huge_page_splitting() can
			 * return and the head page can be freed from
			 * under us. We can't take the PG_lock and/or
			 * PG_compound_lock on a page that could be
			 * freed from under us.
			 */
			if (page != page_head) {
				get_page(page_head);
				put_page(page);
			}
			local_irq_enable();
		} else {
			local_irq_enable();
			goto again;
		}
	}
#else
	page_head = compound_head(page);
	if (page != page_head) {
		get_page(page_head);
		put_page(page);
	}
#endif

	lock_page(page_head);
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489

	/*
	 * If page_head->mapping is NULL, then it cannot be a PageAnon
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
	 * an unlikely race, but we do need to retry for page_head->mapping.
	 */
490
	if (!page_head->mapping) {
491
		int shmem_swizzled = PageSwapCache(page_head);
492 493
		unlock_page(page_head);
		put_page(page_head);
494 495 496
		if (shmem_swizzled)
			goto again;
		return -EFAULT;
497
	}
L
Linus Torvalds 已提交
498 499 500 501 502 503

	/*
	 * Private mappings are handled in a simple way.
	 *
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
504
	 * the object not the particular process.
L
Linus Torvalds 已提交
505
	 */
506
	if (PageAnon(page_head)) {
507 508 509 510 511 512 513 514 515
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
		if (ro) {
			err = -EFAULT;
			goto out;
		}

516
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
517
		key->private.mm = mm;
518
		key->private.address = address;
519 520
	} else {
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
521
		key->shared.inode = page_head->mapping->host;
522
		key->shared.pgoff = basepage_index(page);
L
Linus Torvalds 已提交
523 524
	}

525
	get_futex_key_refs(key); /* implies MB (B) */
L
Linus Torvalds 已提交
526

527
out:
528 529
	unlock_page(page_head);
	put_page(page_head);
530
	return err;
L
Linus Torvalds 已提交
531 532
}

533
static inline void put_futex_key(union futex_key *key)
L
Linus Torvalds 已提交
534
{
535
	drop_futex_key_refs(key);
L
Linus Torvalds 已提交
536 537
}

538 539
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
540 541 542 543 544
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
545
 * We have no generic implementation of a non-destructive write to the
546 547 548 549 550 551
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
552 553 554 555
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
556 557
	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
			       FAULT_FLAG_WRITE);
558 559
	up_read(&mm->mmap_sem);

560 561 562
	return ret < 0 ? ret : 0;
}

563 564
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
565 566
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

582 583
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
584
{
585
	int ret;
T
Thomas Gleixner 已提交
586 587

	pagefault_disable();
588
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
589 590
	pagefault_enable();

591
	return ret;
T
Thomas Gleixner 已提交
592 593 594
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
595 596 597
{
	int ret;

598
	pagefault_disable();
599
	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
600
	pagefault_enable();
L
Linus Torvalds 已提交
601 602 603 604

	return ret ? -EFAULT : 0;
}

605 606 607 608 609 610 611 612 613 614 615

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

616
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
617 618 619 620 621 622 623 624

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
	atomic_set(&pi_state->refcount, 1);
625
	pi_state->key = FUTEX_KEY_INIT;
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651

	current->pi_state_cache = pi_state;

	return 0;
}

static struct futex_pi_state * alloc_pi_state(void)
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

static void free_pi_state(struct futex_pi_state *pi_state)
{
	if (!atomic_dec_and_test(&pi_state->refcount))
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
652
		raw_spin_lock_irq(&pi_state->owner->pi_lock);
653
		list_del_init(&pi_state->list);
654
		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680

		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
	}

	if (current->pi_state_cache)
		kfree(pi_state);
	else {
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
		atomic_set(&pi_state->refcount, 1);
		current->pi_state_cache = pi_state;
	}
}

/*
 * Look up the task based on what TID userspace gave us.
 * We dont trust it.
 */
static struct task_struct * futex_find_get_task(pid_t pid)
{
	struct task_struct *p;

681
	rcu_read_lock();
682
	p = find_task_by_vpid(pid);
683 684
	if (p)
		get_task_struct(p);
685

686
	rcu_read_unlock();
687 688 689 690 691 692 693 694 695 696 697 698 699

	return p;
}

/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
void exit_pi_state_list(struct task_struct *curr)
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
700
	struct futex_hash_bucket *hb;
701
	union futex_key key = FUTEX_KEY_INIT;
702

703 704
	if (!futex_cmpxchg_enabled)
		return;
705 706 707
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
708
	 * versus waiters unqueueing themselves:
709
	 */
710
	raw_spin_lock_irq(&curr->pi_lock);
711 712 713 714 715
	while (!list_empty(head)) {

		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
716
		hb = hash_futex(&key);
717
		raw_spin_unlock_irq(&curr->pi_lock);
718 719 720

		spin_lock(&hb->lock);

721
		raw_spin_lock_irq(&curr->pi_lock);
722 723 724 725
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
726 727 728 729 730 731
		if (head->next != next) {
			spin_unlock(&hb->lock);
			continue;
		}

		WARN_ON(pi_state->owner != curr);
732 733
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
734
		pi_state->owner = NULL;
735
		raw_spin_unlock_irq(&curr->pi_lock);
736 737 738 739 740

		rt_mutex_unlock(&pi_state->pi_mutex);

		spin_unlock(&hb->lock);

741
		raw_spin_lock_irq(&curr->pi_lock);
742
	}
743
	raw_spin_unlock_irq(&curr->pi_lock);
744 745
}

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
/*
 * We need to check the following states:
 *
 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 *
 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 *
 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 *
 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 *
 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 *
 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 *
 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 *
 * [1]	Indicates that the kernel can acquire the futex atomically. We
 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 *
 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 *      thread is found then it indicates that the owner TID has died.
 *
 * [3]	Invalid. The waiter is queued on a non PI futex
 *
 * [4]	Valid state after exit_robust_list(), which sets the user space
 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 *
 * [5]	The user space value got manipulated between exit_robust_list()
 *	and exit_pi_state_list()
 *
 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 *	the pi_state but cannot access the user space value.
 *
 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 *
 * [8]	Owner and user space value match
 *
 * [9]	There is no transient state which sets the user space TID to 0
 *	except exit_robust_list(), but this is indicated by the
 *	FUTEX_OWNER_DIED bit. See [4]
 *
 * [10] There is no transient state which leaves owner and user space
 *	TID out of sync.
 */
795
static int
P
Pierre Peiffer 已提交
796
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
797
		union futex_key *key, struct futex_pi_state **ps)
798 799 800 801
{
	struct futex_pi_state *pi_state = NULL;
	struct futex_q *this, *next;
	struct task_struct *p;
802
	pid_t pid = uval & FUTEX_TID_MASK;
803

J
Jason Low 已提交
804
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
P
Pierre Peiffer 已提交
805
		if (match_futex(&this->key, key)) {
806
			/*
807 808
			 * Sanity check the waiter before increasing
			 * the refcount and attaching to it.
809 810
			 */
			pi_state = this->pi_state;
811
			/*
812 813
			 * Userspace might have messed up non-PI and
			 * PI futexes [3]
814 815 816 817
			 */
			if (unlikely(!pi_state))
				return -EINVAL;

818
			WARN_ON(!atomic_read(&pi_state->refcount));
819 820

			/*
821
			 * Handle the owner died case:
822
			 */
823
			if (uval & FUTEX_OWNER_DIED) {
824
				/*
825 826 827 828
				 * exit_pi_state_list sets owner to NULL and
				 * wakes the topmost waiter. The task which
				 * acquires the pi_state->rt_mutex will fixup
				 * owner.
829
				 */
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
				if (!pi_state->owner) {
					/*
					 * No pi state owner, but the user
					 * space TID is not 0. Inconsistent
					 * state. [5]
					 */
					if (pid)
						return -EINVAL;
					/*
					 * Take a ref on the state and
					 * return. [4]
					 */
					goto out_state;
				}

				/*
				 * If TID is 0, then either the dying owner
				 * has not yet executed exit_pi_state_list()
				 * or some waiter acquired the rtmutex in the
				 * pi state, but did not yet fixup the TID in
				 * user space.
				 *
				 * Take a ref on the state and return. [6]
				 */
				if (!pid)
					goto out_state;
			} else {
				/*
				 * If the owner died bit is not set,
				 * then the pi_state must have an
				 * owner. [7]
				 */
				if (!pi_state->owner)
863 864
					return -EINVAL;
			}
865

866
			/*
867 868 869 870
			 * Bail out if user space manipulated the
			 * futex value. If pi state exists then the
			 * owner TID must be the same as the user
			 * space TID. [9/10]
871
			 */
872 873
			if (pid != task_pid_vnr(pi_state->owner))
				return -EINVAL;
874

875
		out_state:
876
			atomic_inc(&pi_state->refcount);
P
Pierre Peiffer 已提交
877
			*ps = pi_state;
878 879 880 881 882
			return 0;
		}
	}

	/*
883
	 * We are the first waiter - try to look up the real owner and attach
884
	 * the new pi_state to it, but bail out when TID = 0 [1]
885
	 */
886
	if (!pid)
887
		return -ESRCH;
888
	p = futex_find_get_task(pid);
889 890
	if (!p)
		return -ESRCH;
891

892 893 894 895 896
	if (!p->mm) {
		put_task_struct(p);
		return -EPERM;
	}

897 898 899 900 901 902
	/*
	 * We need to look at the task state flags to figure out,
	 * whether the task is exiting. To protect against the do_exit
	 * change of the task flags, we do this protected by
	 * p->pi_lock:
	 */
903
	raw_spin_lock_irq(&p->pi_lock);
904 905 906 907 908 909 910 911
	if (unlikely(p->flags & PF_EXITING)) {
		/*
		 * The task is on the way out. When PF_EXITPIDONE is
		 * set, we know that the task has finished the
		 * cleanup:
		 */
		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;

912
		raw_spin_unlock_irq(&p->pi_lock);
913 914 915
		put_task_struct(p);
		return ret;
	}
916

917 918 919
	/*
	 * No existing pi state. First waiter. [2]
	 */
920 921 922 923 924 925 926 927 928
	pi_state = alloc_pi_state();

	/*
	 * Initialize the pi_mutex in locked state and make 'p'
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
929
	pi_state->key = *key;
930

931
	WARN_ON(!list_empty(&pi_state->list));
932 933
	list_add(&pi_state->list, &p->pi_state_list);
	pi_state->owner = p;
934
	raw_spin_unlock_irq(&p->pi_lock);
935 936 937

	put_task_struct(p);

P
Pierre Peiffer 已提交
938
	*ps = pi_state;
939 940 941 942

	return 0;
}

943
/**
944
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
945 946 947 948 949 950 951 952
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
953
 *
954 955 956
 * Return:
 *  0 - ready to wait;
 *  1 - acquired the lock;
957 958 959 960 961 962 963
 * <0 - error
 *
 * The hb->lock and futex_key refs shall be held by the caller.
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
964
				struct task_struct *task, int set_waiters)
965
{
966
	int lock_taken, ret, force_take = 0;
967
	u32 uval, newval, curval, vpid = task_pid_vnr(task);
968 969 970 971 972 973 974 975 976

retry:
	ret = lock_taken = 0;

	/*
	 * To avoid races, we attempt to take the lock here again
	 * (by doing a 0 -> TID atomic cmpxchg), while holding all
	 * the locks. It will most likely not succeed.
	 */
977
	newval = vpid;
978 979
	if (set_waiters)
		newval |= FUTEX_WAITERS;
980

981
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
982 983 984 985 986
		return -EFAULT;

	/*
	 * Detect deadlocks.
	 */
987
	if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
988 989 990
		return -EDEADLK;

	/*
991
	 * Surprise - we got the lock, but we do not trust user space at all.
992
	 */
993 994 995 996 997 998 999 1000 1001 1002
	if (unlikely(!curval)) {
		/*
		 * We verify whether there is kernel state for this
		 * futex. If not, we can safely assume, that the 0 ->
		 * TID transition is correct. If state exists, we do
		 * not bother to fixup the user space state as it was
		 * corrupted already.
		 */
		return futex_top_waiter(hb, key) ? -EINVAL : 1;
	}
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012

	uval = curval;

	/*
	 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
	 * to wake at the next unlock.
	 */
	newval = curval | FUTEX_WAITERS;

	/*
1013
	 * Should we force take the futex? See below.
1014
	 */
1015 1016 1017 1018 1019
	if (unlikely(force_take)) {
		/*
		 * Keep the OWNER_DIED and the WAITERS bit and set the
		 * new TID value.
		 */
1020
		newval = (curval & ~FUTEX_TID_MASK) | vpid;
1021
		force_take = 0;
1022 1023 1024
		lock_taken = 1;
	}

1025
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1026 1027 1028 1029 1030
		return -EFAULT;
	if (unlikely(curval != uval))
		goto retry;

	/*
1031
	 * We took the lock due to forced take over.
1032 1033 1034 1035 1036 1037 1038 1039
	 */
	if (unlikely(lock_taken))
		return 1;

	/*
	 * We dont have the lock. Look up the PI state (or create it if
	 * we are the first waiter):
	 */
1040
	ret = lookup_pi_state(uval, hb, key, ps);
1041 1042 1043 1044 1045

	if (unlikely(ret)) {
		switch (ret) {
		case -ESRCH:
			/*
1046 1047 1048 1049 1050 1051 1052 1053
			 * We failed to find an owner for this
			 * futex. So we have no pi_state to block
			 * on. This can happen in two cases:
			 *
			 * 1) The owner died
			 * 2) A stale FUTEX_WAITERS bit
			 *
			 * Re-read the futex value.
1054 1055 1056 1057 1058
			 */
			if (get_futex_value_locked(&curval, uaddr))
				return -EFAULT;

			/*
1059 1060 1061
			 * If the owner died or we have a stale
			 * WAITERS bit the owner TID in the user space
			 * futex is 0.
1062
			 */
1063 1064
			if (!(curval & FUTEX_TID_MASK)) {
				force_take = 1;
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
				goto retry;
			}
		default:
			break;
		}
	}

	return ret;
}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

1085 1086
	if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
	    || WARN_ON(plist_node_empty(&q->list)))
1087 1088 1089 1090
		return;

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
1091
	hb_waiters_dec(hb);
1092 1093
}

L
Linus Torvalds 已提交
1094 1095 1096 1097 1098 1099
/*
 * The hash bucket lock must be held when this is called.
 * Afterwards, the futex_q must not be accessed.
 */
static void wake_futex(struct futex_q *q)
{
T
Thomas Gleixner 已提交
1100 1101
	struct task_struct *p = q->task;

1102 1103 1104
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

L
Linus Torvalds 已提交
1105
	/*
T
Thomas Gleixner 已提交
1106
	 * We set q->lock_ptr = NULL _before_ we wake up the task. If
1107 1108
	 * a non-futex wake up happens on another CPU then the task
	 * might exit and p would dereference a non-existing task
T
Thomas Gleixner 已提交
1109 1110
	 * struct. Prevent this by holding a reference on p across the
	 * wake up.
L
Linus Torvalds 已提交
1111
	 */
T
Thomas Gleixner 已提交
1112 1113
	get_task_struct(p);

1114
	__unqueue_futex(q);
L
Linus Torvalds 已提交
1115
	/*
T
Thomas Gleixner 已提交
1116 1117 1118 1119
	 * The waiting task can free the futex_q as soon as
	 * q->lock_ptr = NULL is written, without taking any locks. A
	 * memory barrier is required here to prevent the following
	 * store to lock_ptr from getting ahead of the plist_del.
L
Linus Torvalds 已提交
1120
	 */
1121
	smp_wmb();
L
Linus Torvalds 已提交
1122
	q->lock_ptr = NULL;
T
Thomas Gleixner 已提交
1123 1124 1125

	wake_up_state(p, TASK_NORMAL);
	put_task_struct(p);
L
Linus Torvalds 已提交
1126 1127
}

1128 1129 1130 1131
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
	struct task_struct *new_owner;
	struct futex_pi_state *pi_state = this->pi_state;
1132
	u32 uninitialized_var(curval), newval;
1133
	int ret = 0;
1134 1135 1136 1137

	if (!pi_state)
		return -EINVAL;

1138 1139 1140 1141 1142 1143 1144
	/*
	 * If current does not own the pi_state then the futex is
	 * inconsistent and user space fiddled with the futex value.
	 */
	if (pi_state->owner != current)
		return -EINVAL;

1145
	raw_spin_lock(&pi_state->pi_mutex.wait_lock);
1146 1147 1148
	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);

	/*
1149 1150 1151
	 * It is possible that the next waiter (the one that brought
	 * this owner to the kernel) timed out and is no longer
	 * waiting on the lock.
1152 1153 1154 1155 1156
	 */
	if (!new_owner)
		new_owner = this->task;

	/*
1157 1158 1159
	 * We pass it to the next owner. The WAITERS bit is always
	 * kept enabled while there is PI state around. We cleanup the
	 * owner died bit, because we are the owner.
1160
	 */
1161
	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1162

1163 1164 1165 1166 1167 1168 1169
	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
		ret = -EFAULT;
	else if (curval != uval)
		ret = -EINVAL;
	if (ret) {
		raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
		return ret;
1170
	}
1171

1172
	raw_spin_lock_irq(&pi_state->owner->pi_lock);
1173 1174
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
1175
	raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1176

1177
	raw_spin_lock_irq(&new_owner->pi_lock);
1178
	WARN_ON(!list_empty(&pi_state->list));
1179 1180
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
1181
	raw_spin_unlock_irq(&new_owner->pi_lock);
1182

1183
	raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1184 1185 1186 1187 1188 1189 1190
	rt_mutex_unlock(&pi_state->pi_mutex);

	return 0;
}

static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
1191
	u32 uninitialized_var(oldval);
1192 1193 1194 1195 1196

	/*
	 * There is no waiter, so we unlock the futex. The owner died
	 * bit has not to be preserved here. We are the owner:
	 */
1197 1198
	if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
		return -EFAULT;
1199 1200 1201 1202 1203 1204
	if (oldval != uval)
		return -EAGAIN;

	return 0;
}

I
Ingo Molnar 已提交
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
1221 1222 1223
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
1224
	spin_unlock(&hb1->lock);
1225 1226
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
1227 1228
}

L
Linus Torvalds 已提交
1229
/*
D
Darren Hart 已提交
1230
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
1231
 */
1232 1233
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
1234
{
1235
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1236
	struct futex_q *this, *next;
1237
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
1238 1239
	int ret;

1240 1241 1242
	if (!bitset)
		return -EINVAL;

1243
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
L
Linus Torvalds 已提交
1244 1245 1246
	if (unlikely(ret != 0))
		goto out;

1247
	hb = hash_futex(&key);
1248 1249 1250 1251 1252

	/* Make sure we really have tasks to wakeup */
	if (!hb_waiters_pending(hb))
		goto out_put_key;

1253
	spin_lock(&hb->lock);
L
Linus Torvalds 已提交
1254

J
Jason Low 已提交
1255
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
L
Linus Torvalds 已提交
1256
		if (match_futex (&this->key, &key)) {
1257
			if (this->pi_state || this->rt_waiter) {
1258 1259 1260
				ret = -EINVAL;
				break;
			}
1261 1262 1263 1264 1265

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

L
Linus Torvalds 已提交
1266 1267 1268 1269 1270 1271
			wake_futex(this);
			if (++ret >= nr_wake)
				break;
		}
	}

1272
	spin_unlock(&hb->lock);
1273
out_put_key:
1274
	put_futex_key(&key);
1275
out:
L
Linus Torvalds 已提交
1276 1277 1278
	return ret;
}

1279 1280 1281 1282
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1283
static int
1284
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1285
	      int nr_wake, int nr_wake2, int op)
1286
{
1287
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1288
	struct futex_hash_bucket *hb1, *hb2;
1289
	struct futex_q *this, *next;
D
Darren Hart 已提交
1290
	int ret, op_ret;
1291

D
Darren Hart 已提交
1292
retry:
1293
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1294 1295
	if (unlikely(ret != 0))
		goto out;
1296
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1297
	if (unlikely(ret != 0))
1298
		goto out_put_key1;
1299

1300 1301
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1302

D
Darren Hart 已提交
1303
retry_private:
T
Thomas Gleixner 已提交
1304
	double_lock_hb(hb1, hb2);
1305
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1306 1307
	if (unlikely(op_ret < 0)) {

D
Darren Hart 已提交
1308
		double_unlock_hb(hb1, hb2);
1309

1310
#ifndef CONFIG_MMU
1311 1312 1313 1314
		/*
		 * we don't get EFAULT from MMU faults if we don't have an MMU,
		 * but we might get them from range checking
		 */
1315
		ret = op_ret;
1316
		goto out_put_keys;
1317 1318
#endif

1319 1320
		if (unlikely(op_ret != -EFAULT)) {
			ret = op_ret;
1321
			goto out_put_keys;
1322 1323
		}

1324
		ret = fault_in_user_writeable(uaddr2);
1325
		if (ret)
1326
			goto out_put_keys;
1327

1328
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1329 1330
			goto retry_private;

1331 1332
		put_futex_key(&key2);
		put_futex_key(&key1);
D
Darren Hart 已提交
1333
		goto retry;
1334 1335
	}

J
Jason Low 已提交
1336
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1337
		if (match_futex (&this->key, &key1)) {
1338 1339 1340 1341
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1342 1343 1344 1345 1346 1347 1348 1349
			wake_futex(this);
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
		op_ret = 0;
J
Jason Low 已提交
1350
		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1351
			if (match_futex (&this->key, &key2)) {
1352 1353 1354 1355
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1356 1357 1358 1359 1360 1361 1362 1363
				wake_futex(this);
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1364
out_unlock:
D
Darren Hart 已提交
1365
	double_unlock_hb(hb1, hb2);
1366
out_put_keys:
1367
	put_futex_key(&key2);
1368
out_put_key1:
1369
	put_futex_key(&key1);
1370
out:
1371 1372 1373
	return ret;
}

D
Darren Hart 已提交
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
1392
		hb_waiters_dec(hb1);
D
Darren Hart 已提交
1393
		plist_add(&q->list, &hb2->chain);
1394
		hb_waiters_inc(hb2);
D
Darren Hart 已提交
1395 1396 1397 1398 1399 1400
		q->lock_ptr = &hb2->lock;
	}
	get_futex_key_refs(key2);
	q->key = *key2;
}

1401 1402
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1403 1404 1405
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1406 1407 1408 1409 1410
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1411 1412 1413
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1414 1415
 */
static inline
1416 1417
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1418 1419 1420 1421
{
	get_futex_key_refs(key);
	q->key = *key;

1422
	__unqueue_futex(q);
1423 1424 1425 1426

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1427 1428
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1429
	wake_up_state(q->task, TASK_NORMAL);
1430 1431 1432 1433
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1434 1435 1436 1437 1438 1439 1440
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1441 1442
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1443 1444 1445
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1446
 *
1447 1448
 * Return:
 *  0 - failed to acquire the lock atomically;
1449
 * >0 - acquired the lock, return value is vpid of the top_waiter
1450 1451 1452 1453 1454 1455
 * <0 - error
 */
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
				 struct futex_hash_bucket *hb1,
				 struct futex_hash_bucket *hb2,
				 union futex_key *key1, union futex_key *key2,
1456
				 struct futex_pi_state **ps, int set_waiters)
1457
{
1458
	struct futex_q *top_waiter = NULL;
1459
	u32 curval;
1460
	int ret, vpid;
1461 1462 1463 1464

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1465 1466 1467 1468 1469 1470 1471 1472
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1473 1474 1475 1476 1477 1478
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1479 1480 1481 1482
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1483
	/*
1484 1485 1486
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1487
	 */
1488
	vpid = task_pid_vnr(top_waiter->task);
1489 1490
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
				   set_waiters);
1491
	if (ret == 1) {
1492
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1493 1494
		return vpid;
	}
1495 1496 1497 1498 1499
	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1500
 * @uaddr1:	source futex user address
1501
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1502 1503 1504 1505 1506
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1507
 *		pi futex (pi to pi requeue is not supported)
1508 1509 1510 1511
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1512 1513
 * Return:
 * >=0 - on success, the number of tasks requeued or woken;
1514
 *  <0 - on error
L
Linus Torvalds 已提交
1515
 */
1516 1517 1518
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
1519
{
1520
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1521 1522
	int drop_count = 0, task_count = 0, ret;
	struct futex_pi_state *pi_state = NULL;
1523
	struct futex_hash_bucket *hb1, *hb2;
L
Linus Torvalds 已提交
1524
	struct futex_q *this, *next;
1525 1526

	if (requeue_pi) {
1527 1528 1529 1530 1531 1532 1533
		/*
		 * Requeue PI only works on two distinct uaddrs. This
		 * check is only valid for private futexes. See below.
		 */
		if (uaddr1 == uaddr2)
			return -EINVAL;

1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
1553

1554
retry:
1555 1556 1557 1558 1559 1560 1561 1562 1563
	if (pi_state != NULL) {
		/*
		 * We will have to lookup the pi_state again, so free this one
		 * to keep the accounting correct.
		 */
		free_pi_state(pi_state);
		pi_state = NULL;
	}

1564
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
L
Linus Torvalds 已提交
1565 1566
	if (unlikely(ret != 0))
		goto out;
1567 1568
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
			    requeue_pi ? VERIFY_WRITE : VERIFY_READ);
L
Linus Torvalds 已提交
1569
	if (unlikely(ret != 0))
1570
		goto out_put_key1;
L
Linus Torvalds 已提交
1571

1572 1573 1574 1575 1576 1577 1578 1579 1580
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (requeue_pi && match_futex(&key1, &key2)) {
		ret = -EINVAL;
		goto out_put_keys;
	}

1581 1582
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
1583

D
Darren Hart 已提交
1584
retry_private:
1585
	hb_waiters_inc(hb2);
I
Ingo Molnar 已提交
1586
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1587

1588 1589
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
1590

1591
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
1592 1593

		if (unlikely(ret)) {
D
Darren Hart 已提交
1594
			double_unlock_hb(hb1, hb2);
1595
			hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
1596

1597
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
1598 1599
			if (ret)
				goto out_put_keys;
L
Linus Torvalds 已提交
1600

1601
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1602
				goto retry_private;
L
Linus Torvalds 已提交
1603

1604 1605
			put_futex_key(&key2);
			put_futex_key(&key1);
D
Darren Hart 已提交
1606
			goto retry;
L
Linus Torvalds 已提交
1607
		}
1608
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
1609 1610 1611 1612 1613
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

1614
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1615 1616 1617 1618 1619 1620
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
1621
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1622
						 &key2, &pi_state, nr_requeue);
1623 1624 1625 1626 1627

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
1628 1629
		 * reference to it. If the lock was taken, ret contains the
		 * vpid of the top waiter task.
1630
		 */
1631
		if (ret > 0) {
1632
			WARN_ON(pi_state);
1633
			drop_count++;
1634
			task_count++;
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
			/*
			 * If we acquired the lock, then the user
			 * space value of uaddr2 should be vpid. It
			 * cannot be changed by the top waiter as it
			 * is blocked on hb2 lock if it tries to do
			 * so. If something fiddled with it behind our
			 * back the pi state lookup might unearth
			 * it. So we rather use the known value than
			 * rereading and handing potential crap to
			 * lookup_pi_state.
			 */
1646
			ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1647 1648 1649 1650 1651 1652 1653
		}

		switch (ret) {
		case 0:
			break;
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
1654
			hb_waiters_dec(hb2);
1655 1656
			put_futex_key(&key2);
			put_futex_key(&key1);
1657
			ret = fault_in_user_writeable(uaddr2);
1658 1659 1660 1661 1662 1663
			if (!ret)
				goto retry;
			goto out;
		case -EAGAIN:
			/* The owner was exiting, try again. */
			double_unlock_hb(hb1, hb2);
1664
			hb_waiters_dec(hb2);
1665 1666
			put_futex_key(&key2);
			put_futex_key(&key1);
1667 1668 1669 1670 1671 1672 1673
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

J
Jason Low 已提交
1674
	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1675 1676 1677 1678
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
1679
			continue;
1680

1681 1682 1683
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
1684 1685 1686
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
1687 1688
		 */
		if ((requeue_pi && !this->rt_waiter) ||
1689 1690
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
1691 1692 1693
			ret = -EINVAL;
			break;
		}
1694 1695 1696 1697 1698 1699 1700

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
L
Linus Torvalds 已提交
1701
			wake_futex(this);
1702 1703
			continue;
		}
L
Linus Torvalds 已提交
1704

1705 1706 1707 1708 1709 1710
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
			/* Prepare the waiter to take the rt_mutex. */
			atomic_inc(&pi_state->refcount);
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
1721
							this->task);
1722 1723
			if (ret == 1) {
				/* We got the lock. */
1724
				requeue_pi_wake_futex(this, &key2, hb2);
1725
				drop_count++;
1726 1727 1728 1729 1730 1731 1732
				continue;
			} else if (ret) {
				/* -EDEADLK */
				this->pi_state = NULL;
				free_pi_state(pi_state);
				goto out_unlock;
			}
L
Linus Torvalds 已提交
1733
		}
1734 1735
		requeue_futex(this, hb1, hb2, &key2);
		drop_count++;
L
Linus Torvalds 已提交
1736 1737 1738
	}

out_unlock:
D
Darren Hart 已提交
1739
	double_unlock_hb(hb1, hb2);
1740
	hb_waiters_dec(hb2);
L
Linus Torvalds 已提交
1741

1742 1743 1744 1745 1746 1747
	/*
	 * drop_futex_key_refs() must be called outside the spinlocks. During
	 * the requeue we moved futex_q's from the hash bucket at key1 to the
	 * one at key2 and updated their key pointer.  We no longer need to
	 * hold the references to key1.
	 */
L
Linus Torvalds 已提交
1748
	while (--drop_count >= 0)
1749
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
1750

1751
out_put_keys:
1752
	put_futex_key(&key2);
1753
out_put_key1:
1754
	put_futex_key(&key1);
1755
out:
1756 1757 1758
	if (pi_state != NULL)
		free_pi_state(pi_state);
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
1759 1760 1761
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
1762
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1763
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
1764
{
1765
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1766

1767
	hb = hash_futex(&q->key);
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778

	/*
	 * Increment the counter before taking the lock so that
	 * a potential waker won't miss a to-be-slept task that is
	 * waiting for the spinlock. This is safe as all queue_lock()
	 * users end up calling queue_me(). Similarly, for housekeeping,
	 * decrement the counter at queue_unlock() when some error has
	 * occurred and we don't end up adding the task to the list.
	 */
	hb_waiters_inc(hb);

1779
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
1780

1781
	spin_lock(&hb->lock); /* implies MB (A) */
1782
	return hb;
L
Linus Torvalds 已提交
1783 1784
}

1785
static inline void
J
Jason Low 已提交
1786
queue_unlock(struct futex_hash_bucket *hb)
1787
	__releases(&hb->lock)
1788 1789
{
	spin_unlock(&hb->lock);
1790
	hb_waiters_dec(hb);
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
E
Eric Sesterhenn 已提交
1805
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1806
	__releases(&hb->lock)
L
Linus Torvalds 已提交
1807
{
P
Pierre Peiffer 已提交
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
1822
	q->task = current;
1823
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
1824 1825
}

1826 1827 1828 1829 1830 1831 1832
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
1833 1834
 * Return:
 *   1 - if the futex_q was still queued (and we removed unqueued it);
1835
 *   0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
1836 1837 1838 1839
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
1840
	int ret = 0;
L
Linus Torvalds 已提交
1841 1842

	/* In the common case we don't take the spinlock, which is nice. */
1843
retry:
L
Linus Torvalds 已提交
1844
	lock_ptr = q->lock_ptr;
1845
	barrier();
1846
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
1865
		__unqueue_futex(q);
1866 1867 1868

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
1869 1870 1871 1872
		spin_unlock(lock_ptr);
		ret = 1;
	}

1873
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
1874 1875 1876
	return ret;
}

1877 1878
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
1879 1880
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
1881
 */
P
Pierre Peiffer 已提交
1882
static void unqueue_me_pi(struct futex_q *q)
1883
	__releases(q->lock_ptr)
1884
{
1885
	__unqueue_futex(q);
1886 1887 1888 1889 1890

	BUG_ON(!q->pi_state);
	free_pi_state(q->pi_state);
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
1891
	spin_unlock(q->lock_ptr);
1892 1893
}

P
Pierre Peiffer 已提交
1894
/*
1895
 * Fixup the pi_state owner with the new owner.
P
Pierre Peiffer 已提交
1896
 *
1897 1898
 * Must be called with hash bucket lock held and mm->sem held for non
 * private futexes.
P
Pierre Peiffer 已提交
1899
 */
1900
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1901
				struct task_struct *newowner)
P
Pierre Peiffer 已提交
1902
{
1903
	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Pierre Peiffer 已提交
1904
	struct futex_pi_state *pi_state = q->pi_state;
1905
	struct task_struct *oldowner = pi_state->owner;
1906
	u32 uval, uninitialized_var(curval), newval;
D
Darren Hart 已提交
1907
	int ret;
P
Pierre Peiffer 已提交
1908 1909

	/* Owner died? */
1910 1911 1912 1913 1914
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;

	/*
	 * We are here either because we stole the rtmutex from the
1915 1916 1917 1918
	 * previous highest priority waiter or we are the highest priority
	 * waiter but failed to get the rtmutex the first time.
	 * We have to replace the newowner TID in the user space variable.
	 * This must be atomic as we have to preserve the owner died bit here.
1919
	 *
D
Darren Hart 已提交
1920 1921 1922
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
	 *
	 * Modifying pi_state _before_ the user space value would
	 * leave the pi_state in an inconsistent state when we fault
	 * here, because we need to drop the hash bucket lock to
	 * handle the fault. This might be observed in the PID check
	 * in lookup_pi_state.
	 */
retry:
	if (get_futex_value_locked(&uval, uaddr))
		goto handle_fault;

	while (1) {
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

1937
		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
			goto handle_fault;
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
1948
	if (pi_state->owner != NULL) {
1949
		raw_spin_lock_irq(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
1950 1951
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
1952
		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1953
	}
P
Pierre Peiffer 已提交
1954

1955
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
1956

1957
	raw_spin_lock_irq(&newowner->pi_lock);
P
Pierre Peiffer 已提交
1958
	WARN_ON(!list_empty(&pi_state->list));
1959
	list_add(&pi_state->list, &newowner->pi_state_list);
1960
	raw_spin_unlock_irq(&newowner->pi_lock);
1961
	return 0;
P
Pierre Peiffer 已提交
1962 1963

	/*
1964
	 * To handle the page fault we need to drop the hash bucket
1965 1966
	 * lock here. That gives the other task (either the highest priority
	 * waiter itself or the task which stole the rtmutex) the
1967 1968 1969 1970 1971
	 * chance to try the fixup of the pi_state. So once we are
	 * back from handling the fault we need to check the pi_state
	 * after reacquiring the hash bucket lock and before trying to
	 * do another fixup. When the fixup has been done already we
	 * simply return.
P
Pierre Peiffer 已提交
1972
	 */
1973 1974
handle_fault:
	spin_unlock(q->lock_ptr);
1975

1976
	ret = fault_in_user_writeable(uaddr);
1977

1978
	spin_lock(q->lock_ptr);
1979

1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
	/*
	 * Check if someone else fixed it for us:
	 */
	if (pi_state->owner != oldowner)
		return 0;

	if (ret)
		return ret;

	goto retry;
P
Pierre Peiffer 已提交
1990 1991
}

N
Nick Piggin 已提交
1992
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
1993

1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
2004 2005 2006
 * Return:
 *  1 - success, lock taken;
 *  0 - success, lock not taken;
2007 2008
 * <0 - on error (-EFAULT)
 */
2009
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
{
	struct task_struct *owner;
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
		 */
		if (q->pi_state->owner != current)
2020
			ret = fixup_pi_state_owner(uaddr, q, current);
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
		goto out;
	}

	/*
	 * Catch the rare case, where the lock was released when we were on the
	 * way back before we locked the hash bucket.
	 */
	if (q->pi_state->owner == current) {
		/*
		 * Try to get the rt_mutex now. This might fail as some other
		 * task acquired the rt_mutex after we removed ourself from the
		 * rt_mutex waiters list.
		 */
		if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
			locked = 1;
			goto out;
		}

		/*
		 * pi_state is incorrect, some other task did a lock steal and
		 * we returned due to timeout or signal without taking the
2042
		 * rt_mutex. Too late.
2043
		 */
2044
		raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
2045
		owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2046 2047 2048
		if (!owner)
			owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
		raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
2049
		ret = fixup_pi_state_owner(uaddr, q, owner);
2050 2051 2052 2053 2054
		goto out;
	}

	/*
	 * Paranoia check. If we did not take the lock, then we should not be
2055
	 * the owner of the rt_mutex.
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
	 */
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);

out:
	return ret ? ret : locked;
}

2067 2068 2069 2070 2071 2072 2073
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
2074
				struct hrtimer_sleeper *timeout)
2075
{
2076 2077 2078 2079 2080 2081
	/*
	 * The task state is guaranteed to be set before another task can
	 * wake it. set_current_state() is implemented using set_mb() and
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
2082
	set_current_state(TASK_INTERRUPTIBLE);
2083
	queue_me(q, hb);
2084 2085 2086 2087 2088 2089 2090 2091 2092

	/* Arm the timer */
	if (timeout) {
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
		if (!hrtimer_active(&timeout->timer))
			timeout->task = NULL;
	}

	/*
2093 2094
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
2095 2096 2097 2098 2099 2100 2101 2102
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
2103
			freezable_schedule();
2104 2105 2106 2107
	}
	__set_current_state(TASK_RUNNING);
}

2108 2109 2110 2111
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
2112
 * @flags:	futex flags (FLAGS_SHARED, etc.)
2113 2114 2115 2116 2117 2118 2119 2120
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
2121 2122
 * Return:
 *  0 - uaddr contains val and hb has been locked;
2123
 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2124
 */
2125
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2126
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
2127
{
2128 2129
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
2130 2131

	/*
D
Darren Hart 已提交
2132
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
2133 2134 2135 2136 2137 2138 2139
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
2140 2141
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
2142 2143
	 * cond(var) false, which would violate the guarantee.
	 *
2144 2145 2146 2147
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
2148
	 */
2149
retry:
2150
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2151
	if (unlikely(ret != 0))
2152
		return ret;
2153 2154 2155 2156

retry_private:
	*hb = queue_lock(q);

2157
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
2158

2159
	if (ret) {
J
Jason Low 已提交
2160
		queue_unlock(*hb);
L
Linus Torvalds 已提交
2161

2162
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
2163
		if (ret)
2164
			goto out;
L
Linus Torvalds 已提交
2165

2166
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2167 2168
			goto retry_private;

2169
		put_futex_key(&q->key);
D
Darren Hart 已提交
2170
		goto retry;
L
Linus Torvalds 已提交
2171
	}
2172

2173
	if (uval != val) {
J
Jason Low 已提交
2174
		queue_unlock(*hb);
2175
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
2176
	}
L
Linus Torvalds 已提交
2177

2178 2179
out:
	if (ret)
2180
		put_futex_key(&q->key);
2181 2182 2183
	return ret;
}

2184 2185
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
2186 2187 2188 2189
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
2190
	struct futex_q q = futex_q_init;
2191 2192 2193 2194 2195 2196 2197 2198 2199
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

	if (abs_time) {
		to = &timeout;

2200 2201 2202
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2203 2204 2205 2206 2207
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

T
Thomas Gleixner 已提交
2208
retry:
2209 2210 2211 2212
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
2213
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2214 2215 2216
	if (ret)
		goto out;

2217
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
2218
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
2219 2220

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
2221
	ret = 0;
2222
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
2223
	if (!unqueue_me(&q))
2224
		goto out;
P
Peter Zijlstra 已提交
2225
	ret = -ETIMEDOUT;
2226
	if (to && !to->task)
2227
		goto out;
N
Nick Piggin 已提交
2228

2229
	/*
T
Thomas Gleixner 已提交
2230 2231
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
2232
	 */
2233
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
2234 2235
		goto retry;

P
Peter Zijlstra 已提交
2236
	ret = -ERESTARTSYS;
2237
	if (!abs_time)
2238
		goto out;
L
Linus Torvalds 已提交
2239

P
Peter Zijlstra 已提交
2240 2241
	restart = &current_thread_info()->restart_block;
	restart->fn = futex_wait_restart;
2242
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
2243 2244 2245
	restart->futex.val = val;
	restart->futex.time = abs_time->tv64;
	restart->futex.bitset = bitset;
2246
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2247

P
Peter Zijlstra 已提交
2248 2249
	ret = -ERESTART_RESTARTBLOCK;

2250
out:
2251 2252 2253 2254
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
2255 2256 2257
	return ret;
}

N
Nick Piggin 已提交
2258 2259 2260

static long futex_wait_restart(struct restart_block *restart)
{
2261
	u32 __user *uaddr = restart->futex.uaddr;
2262
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
2263

2264 2265 2266 2267
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
		t.tv64 = restart->futex.time;
		tp = &t;
	}
N
Nick Piggin 已提交
2268
	restart->fn = do_no_restart_syscall;
2269 2270 2271

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
2272 2273 2274
}


2275 2276 2277 2278 2279 2280
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
 * if there are waiters then it will block, it does PI, etc. (Due to
 * races the kernel might see a 0 value of the futex too.)
 */
2281 2282
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
			 ktime_t *time, int trylock)
2283
{
2284
	struct hrtimer_sleeper timeout, *to = NULL;
2285
	struct futex_hash_bucket *hb;
2286
	struct futex_q q = futex_q_init;
2287
	int res, ret;
2288 2289 2290 2291

	if (refill_pi_state_cache())
		return -ENOMEM;

2292
	if (time) {
2293
		to = &timeout;
2294 2295
		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
				      HRTIMER_MODE_ABS);
2296
		hrtimer_init_sleeper(to, current);
2297
		hrtimer_set_expires(&to->timer, *time);
2298 2299
	}

2300
retry:
2301
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2302
	if (unlikely(ret != 0))
2303
		goto out;
2304

D
Darren Hart 已提交
2305
retry_private:
E
Eric Sesterhenn 已提交
2306
	hb = queue_lock(&q);
2307

2308
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2309
	if (unlikely(ret)) {
2310
		switch (ret) {
2311 2312 2313 2314 2315 2316
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2317 2318 2319 2320 2321
		case -EAGAIN:
			/*
			 * Task is exiting and we just wait for the
			 * exit to complete.
			 */
J
Jason Low 已提交
2322
			queue_unlock(hb);
2323
			put_futex_key(&q.key);
2324 2325 2326
			cond_resched();
			goto retry;
		default:
2327
			goto out_unlock_put_key;
2328 2329 2330 2331 2332 2333
		}
	}

	/*
	 * Only actually queue now that the atomic ops are done:
	 */
E
Eric Sesterhenn 已提交
2334
	queue_me(&q, hb);
2335 2336 2337 2338 2339

	WARN_ON(!q.pi_state);
	/*
	 * Block on the PI mutex:
	 */
2340 2341 2342
	if (!trylock) {
		ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
	} else {
2343 2344 2345 2346 2347
		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
	}

2348
	spin_lock(q.lock_ptr);
2349 2350 2351 2352
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
2353
	res = fixup_owner(uaddr, &q, !ret);
2354 2355 2356 2357 2358 2359
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
2360

2361
	/*
2362 2363
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
2364 2365 2366 2367
	 */
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
		rt_mutex_unlock(&q.pi_state->pi_mutex);

2368 2369
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
2370

2371
	goto out_put_key;
2372

2373
out_unlock_put_key:
J
Jason Low 已提交
2374
	queue_unlock(hb);
2375

2376
out_put_key:
2377
	put_futex_key(&q.key);
2378
out:
2379 2380
	if (to)
		destroy_hrtimer_on_stack(&to->timer);
2381
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2382

2383
uaddr_faulted:
J
Jason Low 已提交
2384
	queue_unlock(hb);
2385

2386
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
2387 2388
	if (ret)
		goto out_put_key;
2389

2390
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2391 2392
		goto retry_private;

2393
	put_futex_key(&q.key);
D
Darren Hart 已提交
2394
	goto retry;
2395 2396 2397 2398 2399 2400 2401
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
2402
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2403 2404 2405
{
	struct futex_hash_bucket *hb;
	struct futex_q *this, *next;
2406
	union futex_key key = FUTEX_KEY_INIT;
2407
	u32 uval, vpid = task_pid_vnr(current);
D
Darren Hart 已提交
2408
	int ret;
2409 2410 2411 2412 2413 2414 2415

retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
2416
	if ((uval & FUTEX_TID_MASK) != vpid)
2417 2418
		return -EPERM;

2419
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2420 2421 2422 2423 2424 2425 2426 2427 2428
	if (unlikely(ret != 0))
		goto out;

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
	 * To avoid races, try to do the TID -> 0 atomic transition
	 * again. If it succeeds then we can return without waking
2429 2430
	 * anyone else up. We only try this if neither the waiters nor
	 * the owner died bit are set.
2431
	 */
2432
	if (!(uval & ~FUTEX_TID_MASK) &&
2433
	    cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2434 2435 2436 2437 2438
		goto pi_faulted;
	/*
	 * Rare case: we managed to release the lock atomically,
	 * no need to wake anyone else up:
	 */
2439
	if (unlikely(uval == vpid))
2440 2441 2442 2443 2444 2445
		goto out_unlock;

	/*
	 * Ok, other tasks may need to be woken up - check waiters
	 * and do the wakeup if necessary:
	 */
J
Jason Low 已提交
2446
	plist_for_each_entry_safe(this, next, &hb->chain, list) {
2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
		if (!match_futex (&this->key, &key))
			continue;
		ret = wake_futex_pi(uaddr, uval, this);
		/*
		 * The atomic access to the futex value
		 * generated a pagefault, so retry the
		 * user-access and the wakeup:
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
		goto out_unlock;
	}
	/*
	 * No waiters - kernel unlocks the futex:
	 */
2462 2463 2464
	ret = unlock_futex_pi(uaddr, uval);
	if (ret == -EFAULT)
		goto pi_faulted;
2465 2466 2467

out_unlock:
	spin_unlock(&hb->lock);
2468
	put_futex_key(&key);
2469

2470
out:
2471 2472 2473
	return ret;

pi_faulted:
2474
	spin_unlock(&hb->lock);
2475
	put_futex_key(&key);
2476

2477
	ret = fault_in_user_writeable(uaddr);
2478
	if (!ret)
2479 2480
		goto retry;

L
Linus Torvalds 已提交
2481 2482 2483
	return ret;
}

2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
2496 2497 2498
 * Return:
 *  0 = no early wakeup detected;
 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
2520
		plist_del(&q->list, &hb->chain);
2521
		hb_waiters_dec(hb);
2522

T
Thomas Gleixner 已提交
2523
		/* Handle spurious wakeups gracefully */
2524
		ret = -EWOULDBLOCK;
2525 2526
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
2527
		else if (signal_pending(current))
2528
			ret = -ERESTARTNOINTR;
2529 2530 2531 2532 2533 2534
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2535
 * @uaddr:	the futex we initially wait on (non-pi)
2536
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2537 2538 2539
 * 		the same type, no requeueing from private to shared, etc.
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
2540
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
2541 2542 2543
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2544 2545 2546 2547 2548
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
2549 2550
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2551
 * via the following--
2552
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2553 2554 2555
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
2556
 *
2557
 * If 3, cleanup and return -ERESTARTNOINTR.
2558 2559 2560 2561 2562 2563 2564
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
2565
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2566 2567 2568
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
2569 2570
 * Return:
 *  0 - On success;
2571 2572
 * <0 - On error
 */
2573
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2574
				 u32 val, ktime_t *abs_time, u32 bitset,
2575
				 u32 __user *uaddr2)
2576 2577 2578 2579 2580
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct rt_mutex_waiter rt_waiter;
	struct rt_mutex *pi_mutex = NULL;
	struct futex_hash_bucket *hb;
2581 2582
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
2583 2584
	int res, ret;

2585 2586 2587
	if (uaddr == uaddr2)
		return -EINVAL;

2588 2589 2590 2591 2592
	if (!bitset)
		return -EINVAL;

	if (abs_time) {
		to = &timeout;
2593 2594 2595
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
	debug_rt_mutex_init_waiter(&rt_waiter);
2606 2607
	RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
	RB_CLEAR_NODE(&rt_waiter.tree_entry);
2608 2609
	rt_waiter.task = NULL;

2610
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2611 2612 2613
	if (unlikely(ret != 0))
		goto out;

2614 2615 2616 2617
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

2618 2619 2620 2621
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
2622
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
2623 2624
	if (ret)
		goto out_key2;
2625

2626 2627 2628 2629 2630 2631 2632 2633 2634
	/*
	 * The check above which compares uaddrs is not sufficient for
	 * shared futexes. We need to compare the keys:
	 */
	if (match_futex(&q.key, &key2)) {
		ret = -EINVAL;
		goto out_put_keys;
	}

2635
	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
2636
	futex_wait_queue_me(hb, &q, to);
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
		goto out_put_keys;

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
2648 2649 2650
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
2661
			ret = fixup_pi_state_owner(uaddr2, &q, current);
2662 2663 2664 2665 2666 2667 2668 2669
			spin_unlock(q.lock_ptr);
		}
	} else {
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
2670
		WARN_ON(!q.pi_state);
2671
		pi_mutex = &q.pi_state->pi_mutex;
2672
		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2673 2674 2675 2676 2677 2678 2679
		debug_rt_mutex_free_waiter(&rt_waiter);

		spin_lock(q.lock_ptr);
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
2680
		res = fixup_owner(uaddr2, &q, !ret);
2681 2682
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
2683
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
		 */
		if (res)
			ret = (res < 0) ? res : 0;

		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

	/*
	 * If fixup_pi_state_owner() faulted and was unable to handle the
	 * fault, unlock the rt_mutex and return the fault to userspace.
	 */
	if (ret == -EFAULT) {
2697
		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2698 2699 2700
			rt_mutex_unlock(pi_mutex);
	} else if (ret == -EINTR) {
		/*
2701 2702 2703 2704 2705
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
2706
		 */
2707
		ret = -EWOULDBLOCK;
2708 2709 2710
	}

out_put_keys:
2711
	put_futex_key(&q.key);
T
Thomas Gleixner 已提交
2712
out_key2:
2713
	put_futex_key(&key2);
2714 2715 2716 2717 2718 2719 2720 2721 2722

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

2723 2724 2725 2726 2727 2728 2729
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
2730
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2731 2732 2733 2734 2735 2736 2737 2738
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
2739 2740 2741
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
2742
 */
2743 2744
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
2745
{
2746 2747
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
2760 2761 2762 2763
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
2764
 */
2765 2766 2767
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
2768
{
A
Al Viro 已提交
2769
	struct robust_list_head __user *head;
2770
	unsigned long ret;
2771
	struct task_struct *p;
2772

2773 2774 2775
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

2776 2777 2778
	rcu_read_lock();

	ret = -ESRCH;
2779
	if (!pid)
2780
		p = current;
2781
	else {
2782
		p = find_task_by_vpid(pid);
2783 2784 2785 2786
		if (!p)
			goto err_unlock;
	}

2787 2788 2789 2790 2791 2792 2793
	ret = -EPERM;
	if (!ptrace_may_access(p, PTRACE_MODE_READ))
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

2794 2795 2796 2797 2798
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
2799
	rcu_read_unlock();
2800 2801 2802 2803 2804 2805 2806 2807

	return ret;
}

/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
2808
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2809
{
2810
	u32 uval, uninitialized_var(nval), mval;
2811

2812 2813
retry:
	if (get_user(uval, uaddr))
2814 2815
		return -1;

2816
	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2817 2818 2819 2820 2821 2822 2823 2824 2825 2826
		/*
		 * Ok, this dying thread is truly holding a futex
		 * of interest. Set the OWNER_DIED bit atomically
		 * via cmpxchg, and if the value had FUTEX_WAITERS
		 * set, wake up a waiter (if any). (We have to do a
		 * futex_wake() even if OWNER_DIED is already set -
		 * to handle the rare but possible case of recursive
		 * thread-death.) The rest of the cleanup is done in
		 * userspace.
		 */
2827
		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
		/*
		 * We are not holding a lock here, but we want to have
		 * the pagefault_disable/enable() protection because
		 * we want to handle the fault gracefully. If the
		 * access fails we try to fault in the futex with R/W
		 * verification via get_user_pages. get_user() above
		 * does not guarantee R/W access. If that fails we
		 * give up and leave the futex locked.
		 */
		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
		}
2842
		if (nval != uval)
2843
			goto retry;
2844

2845 2846 2847 2848
		/*
		 * Wake robust non-PI futexes here. The wakeup of
		 * PI futexes happens in exit_pi_state():
		 */
T
Thomas Gleixner 已提交
2849
		if (!pi && (uval & FUTEX_WAITERS))
P
Peter Zijlstra 已提交
2850
			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2851 2852 2853 2854
	}
	return 0;
}

2855 2856 2857 2858
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
2859
				     struct robust_list __user * __user *head,
2860
				     unsigned int *pi)
2861 2862 2863
{
	unsigned long uentry;

A
Al Viro 已提交
2864
	if (get_user(uentry, (unsigned long __user *)head))
2865 2866
		return -EFAULT;

A
Al Viro 已提交
2867
	*entry = (void __user *)(uentry & ~1UL);
2868 2869 2870 2871 2872
	*pi = uentry & 1;

	return 0;
}

2873 2874 2875 2876 2877 2878 2879 2880 2881
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void exit_robust_list(struct task_struct *curr)
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
2882
	struct robust_list __user *entry, *next_entry, *pending;
2883 2884
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
2885
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
2886
	int rc;
2887

2888 2889 2890
	if (!futex_cmpxchg_enabled)
		return;

2891 2892 2893 2894
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
2895
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
2896 2897 2898 2899 2900 2901 2902 2903 2904 2905
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
2906
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2907
		return;
2908

M
Martin Schwidefsky 已提交
2909
	next_entry = NULL;	/* avoid warning with gcc */
2910
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
2911 2912 2913 2914 2915
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2916 2917
		/*
		 * A pending lock might already be on the list, so
2918
		 * don't process it twice:
2919 2920
		 */
		if (entry != pending)
A
Al Viro 已提交
2921
			if (handle_futex_death((void __user *)entry + futex_offset,
2922
						curr, pi))
2923
				return;
M
Martin Schwidefsky 已提交
2924
		if (rc)
2925
			return;
M
Martin Schwidefsky 已提交
2926 2927
		entry = next_entry;
		pi = next_pi;
2928 2929 2930 2931 2932 2933 2934 2935
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
2936 2937 2938 2939

	if (pending)
		handle_futex_death((void __user *)pending + futex_offset,
				   curr, pip);
2940 2941
}

2942
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2943
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
2944
{
T
Thomas Gleixner 已提交
2945
	int cmd = op & FUTEX_CMD_MASK;
2946
	unsigned int flags = 0;
E
Eric Dumazet 已提交
2947 2948

	if (!(op & FUTEX_PRIVATE_FLAG))
2949
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
2950

2951 2952 2953 2954 2955
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
		if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
2956

2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
2967
	switch (cmd) {
L
Linus Torvalds 已提交
2968
	case FUTEX_WAIT:
2969 2970
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
2971
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
2972
	case FUTEX_WAKE:
2973 2974
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
2975
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
2976
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
2977
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
2978
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
2979
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2980
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
2981
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2982
	case FUTEX_LOCK_PI:
T
Thomas Gleixner 已提交
2983
		return futex_lock_pi(uaddr, flags, val, timeout, 0);
2984
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
2985
		return futex_unlock_pi(uaddr, flags);
2986
	case FUTEX_TRYLOCK_PI:
T
Thomas Gleixner 已提交
2987
		return futex_lock_pi(uaddr, flags, 0, timeout, 1);
2988 2989
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
2990 2991
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
2992
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
2993
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
2994
	}
T
Thomas Gleixner 已提交
2995
	return -ENOSYS;
L
Linus Torvalds 已提交
2996 2997 2998
}


2999 3000 3001
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
		struct timespec __user *, utime, u32 __user *, uaddr2,
		u32, val3)
L
Linus Torvalds 已提交
3002
{
3003 3004
	struct timespec ts;
	ktime_t t, *tp = NULL;
3005
	u32 val2 = 0;
E
Eric Dumazet 已提交
3006
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
3007

3008
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3009 3010
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3011
		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
L
Linus Torvalds 已提交
3012
			return -EFAULT;
3013
		if (!timespec_valid(&ts))
3014
			return -EINVAL;
3015 3016

		t = timespec_to_ktime(ts);
E
Eric Dumazet 已提交
3017
		if (cmd == FUTEX_WAIT)
3018
			t = ktime_add_safe(ktime_get(), t);
3019
		tp = &t;
L
Linus Torvalds 已提交
3020 3021
	}
	/*
3022
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3023
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
3024
	 */
3025
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3026
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3027
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
3028

3029
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
3030 3031
}

3032
static void __init futex_detect_cmpxchg(void)
L
Linus Torvalds 已提交
3033
{
3034
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3035
	u32 curval;
3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053

	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
	 * implementation, the non-functional ones will return
	 * -ENOSYS.
	 */
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
		futex_cmpxchg_enabled = 1;
#endif
}

static int __init futex_init(void)
{
3054
	unsigned int futex_shift;
3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065
	unsigned long i;

#if CONFIG_BASE_SMALL
	futex_hashsize = 16;
#else
	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif

	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
					       futex_hashsize, 0,
					       futex_hashsize < 256 ? HASH_SMALL : 0,
3066 3067 3068
					       &futex_shift, NULL,
					       futex_hashsize, futex_hashsize);
	futex_hashsize = 1UL << futex_shift;
3069 3070

	futex_detect_cmpxchg();
3071

3072
	for (i = 0; i < futex_hashsize; i++) {
3073
		atomic_set(&futex_queues[i].waiters, 0);
3074
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
3075 3076 3077
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
3078 3079
	return 0;
}
3080
__initcall(futex_init);