futex.c 71.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
11 12 13 14
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
15 16 17 18
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
19 20 21
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
22 23 24 25
 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 *  Copyright (C) IBM Corporation, 2009
 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 *
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
57
#include <linux/signal.h>
58
#include <linux/export.h>
59
#include <linux/magic.h>
60 61
#include <linux/pid.h>
#include <linux/nsproxy.h>
62
#include <linux/ptrace.h>
63
#include <linux/sched/rt.h>
64
#include <linux/hugetlb.h>
C
Colin Cross 已提交
65
#include <linux/freezer.h>
66

67
#include <asm/futex.h>
L
Linus Torvalds 已提交
68

69
#include "locking/rtmutex_common.h"
70

71 72
int __read_mostly futex_cmpxchg_enabled;

L
Linus Torvalds 已提交
73 74
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)

75 76 77 78 79 80 81 82
/*
 * Futex flags used to encode options to functions and preserve them across
 * restarts.
 */
#define FLAGS_SHARED		0x01
#define FLAGS_CLOCKRT		0x02
#define FLAGS_HAS_TIMEOUT	0x04

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
	atomic_t refcount;

	union futex_key key;
};

104 105
/**
 * struct futex_q - The hashed futex queue entry, one per waiting task
106
 * @list:		priority-sorted list of tasks waiting on this futex
107 108 109 110 111 112 113 114 115
 * @task:		the task waiting on the futex
 * @lock_ptr:		the hash bucket lock
 * @key:		the key the futex is hashed on
 * @pi_state:		optional priority inheritance state
 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 * @requeue_pi_key:	the requeue_pi target futex key
 * @bitset:		bitset for the optional bitmasked wakeup
 *
 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
L
Linus Torvalds 已提交
116 117 118
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
119
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
120
 * The order of wakeup is always to make the first condition true, then
121 122 123 124
 * the second.
 *
 * PI futexes are typically woken before they are removed from the hash list via
 * the rt_mutex code. See unqueue_me_pi().
L
Linus Torvalds 已提交
125 126
 */
struct futex_q {
P
Pierre Peiffer 已提交
127
	struct plist_node list;
L
Linus Torvalds 已提交
128

129
	struct task_struct *task;
L
Linus Torvalds 已提交
130 131
	spinlock_t *lock_ptr;
	union futex_key key;
132
	struct futex_pi_state *pi_state;
133
	struct rt_mutex_waiter *rt_waiter;
134
	union futex_key *requeue_pi_key;
135
	u32 bitset;
L
Linus Torvalds 已提交
136 137
};

138 139 140 141 142 143
static const struct futex_q futex_q_init = {
	/* list gets initialized in queue_me()*/
	.key = FUTEX_KEY_INIT,
	.bitset = FUTEX_BITSET_MATCH_ANY
};

L
Linus Torvalds 已提交
144
/*
D
Darren Hart 已提交
145 146 147
 * Hash buckets are shared by all the futex_keys that hash to the same
 * location.  Each key may have multiple futex_q structures, one for each task
 * waiting on a futex.
L
Linus Torvalds 已提交
148 149
 */
struct futex_hash_bucket {
P
Pierre Peiffer 已提交
150 151
	spinlock_t lock;
	struct plist_head chain;
L
Linus Torvalds 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
};

static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];

/*
 * We hash on the keys returned from get_futex_key (see below).
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
	return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
}

/*
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
172 173
	return (key1 && key2
		&& key1->both.word == key2->both.word
L
Linus Torvalds 已提交
174 175 176 177
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

178 179 180 181 182 183 184 185 186 187 188 189
/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
static void get_futex_key_refs(union futex_key *key)
{
	if (!key->both.ptr)
		return;

	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
A
Al Viro 已提交
190
		ihold(key->shared.inode);
191 192 193 194 195 196 197 198 199 200 201 202 203
		break;
	case FUT_OFF_MMSHARED:
		atomic_inc(&key->private.mm->mm_count);
		break;
	}
}

/*
 * Drop a reference to the resource addressed by a key.
 * The hash bucket spinlock must not be held.
 */
static void drop_futex_key_refs(union futex_key *key)
{
204 205 206
	if (!key->both.ptr) {
		/* If we're here then we tried to put a key we failed to get */
		WARN_ON_ONCE(1);
207
		return;
208
	}
209 210 211 212 213 214 215 216 217 218 219

	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
	case FUT_OFF_INODE:
		iput(key->shared.inode);
		break;
	case FUT_OFF_MMSHARED:
		mmdrop(key->private.mm);
		break;
	}
}

E
Eric Dumazet 已提交
220
/**
221 222 223 224
 * get_futex_key() - Get parameters which are the keys for a futex
 * @uaddr:	virtual address of the futex
 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 * @key:	address where result is stored.
225 226
 * @rw:		mapping needs to be read/write (values: VERIFY_READ,
 *              VERIFY_WRITE)
E
Eric Dumazet 已提交
227
 *
228 229
 * Return: a negative error code or 0
 *
E
Eric Dumazet 已提交
230
 * The key words are stored in *key on success.
L
Linus Torvalds 已提交
231
 *
A
Al Viro 已提交
232
 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
L
Linus Torvalds 已提交
233 234 235
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
D
Darren Hart 已提交
236
 * lock_page() might sleep, the caller should not hold a spinlock.
L
Linus Torvalds 已提交
237
 */
238
static int
239
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
L
Linus Torvalds 已提交
240
{
241
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
242
	struct mm_struct *mm = current->mm;
243
	struct page *page, *page_head;
244
	int err, ro = 0;
L
Linus Torvalds 已提交
245 246 247 248

	/*
	 * The futex address must be "naturally" aligned.
	 */
249
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
250
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
251
		return -EINVAL;
252
	address -= key->both.offset;
L
Linus Torvalds 已提交
253

E
Eric Dumazet 已提交
254 255 256 257 258 259 260 261
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
262
		if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
E
Eric Dumazet 已提交
263 264 265
			return -EFAULT;
		key->private.mm = mm;
		key->private.address = address;
266
		get_futex_key_refs(key);
E
Eric Dumazet 已提交
267 268
		return 0;
	}
L
Linus Torvalds 已提交
269

270
again:
271
	err = get_user_pages_fast(address, 1, 1, &page);
272 273 274 275 276 277 278 279
	/*
	 * If write access is not required (eg. FUTEX_WAIT), try
	 * and get read-only access.
	 */
	if (err == -EFAULT && rw == VERIFY_READ) {
		err = get_user_pages_fast(address, 1, 0, &page);
		ro = 1;
	}
280 281
	if (err < 0)
		return err;
282 283
	else
		err = 0;
284

285 286 287
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	page_head = page;
	if (unlikely(PageTail(page))) {
288
		put_page(page);
289 290
		/* serialize against __split_huge_page_splitting() */
		local_irq_disable();
291
		if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
			page_head = compound_head(page);
			/*
			 * page_head is valid pointer but we must pin
			 * it before taking the PG_lock and/or
			 * PG_compound_lock. The moment we re-enable
			 * irqs __split_huge_page_splitting() can
			 * return and the head page can be freed from
			 * under us. We can't take the PG_lock and/or
			 * PG_compound_lock on a page that could be
			 * freed from under us.
			 */
			if (page != page_head) {
				get_page(page_head);
				put_page(page);
			}
			local_irq_enable();
		} else {
			local_irq_enable();
			goto again;
		}
	}
#else
	page_head = compound_head(page);
	if (page != page_head) {
		get_page(page_head);
		put_page(page);
	}
#endif

	lock_page(page_head);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337

	/*
	 * If page_head->mapping is NULL, then it cannot be a PageAnon
	 * page; but it might be the ZERO_PAGE or in the gate area or
	 * in a special mapping (all cases which we are happy to fail);
	 * or it may have been a good file page when get_user_pages_fast
	 * found it, but truncated or holepunched or subjected to
	 * invalidate_complete_page2 before we got the page lock (also
	 * cases which we are happy to fail).  And we hold a reference,
	 * so refcount care in invalidate_complete_page's remove_mapping
	 * prevents drop_caches from setting mapping to NULL beneath us.
	 *
	 * The case we do have to guard against is when memory pressure made
	 * shmem_writepage move it from filecache to swapcache beneath us:
	 * an unlikely race, but we do need to retry for page_head->mapping.
	 */
338
	if (!page_head->mapping) {
339
		int shmem_swizzled = PageSwapCache(page_head);
340 341
		unlock_page(page_head);
		put_page(page_head);
342 343 344
		if (shmem_swizzled)
			goto again;
		return -EFAULT;
345
	}
L
Linus Torvalds 已提交
346 347 348 349 350 351

	/*
	 * Private mappings are handled in a simple way.
	 *
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
352
	 * the object not the particular process.
L
Linus Torvalds 已提交
353
	 */
354
	if (PageAnon(page_head)) {
355 356 357 358 359 360 361 362 363
		/*
		 * A RO anonymous page will never change and thus doesn't make
		 * sense for futex operations.
		 */
		if (ro) {
			err = -EFAULT;
			goto out;
		}

364
		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
L
Linus Torvalds 已提交
365
		key->private.mm = mm;
366
		key->private.address = address;
367 368
	} else {
		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
369
		key->shared.inode = page_head->mapping->host;
370
		key->shared.pgoff = basepage_index(page);
L
Linus Torvalds 已提交
371 372
	}

373
	get_futex_key_refs(key);
L
Linus Torvalds 已提交
374

375
out:
376 377
	unlock_page(page_head);
	put_page(page_head);
378
	return err;
L
Linus Torvalds 已提交
379 380
}

381
static inline void put_futex_key(union futex_key *key)
L
Linus Torvalds 已提交
382
{
383
	drop_futex_key_refs(key);
L
Linus Torvalds 已提交
384 385
}

386 387
/**
 * fault_in_user_writeable() - Fault in user address and verify RW access
388 389 390 391 392
 * @uaddr:	pointer to faulting user space address
 *
 * Slow path to fixup the fault we just took in the atomic write
 * access to @uaddr.
 *
393
 * We have no generic implementation of a non-destructive write to the
394 395 396 397 398 399
 * user address. We know that we faulted in the atomic pagefault
 * disabled section so we can as well avoid the #PF overhead by
 * calling get_user_pages() right away.
 */
static int fault_in_user_writeable(u32 __user *uaddr)
{
400 401 402 403
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
404 405
	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
			       FAULT_FLAG_WRITE);
406 407
	up_read(&mm->mmap_sem);

408 409 410
	return ret < 0 ? ret : 0;
}

411 412
/**
 * futex_top_waiter() - Return the highest priority waiter on a futex
413 414
 * @hb:		the hash bucket the futex_q's reside in
 * @key:	the futex key (to distinguish it from other futex futex_q's)
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
 *
 * Must be called with the hb lock held.
 */
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
					union futex_key *key)
{
	struct futex_q *this;

	plist_for_each_entry(this, &hb->chain, list) {
		if (match_futex(&this->key, key))
			return this;
	}
	return NULL;
}

430 431
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
				      u32 uval, u32 newval)
T
Thomas Gleixner 已提交
432
{
433
	int ret;
T
Thomas Gleixner 已提交
434 435

	pagefault_disable();
436
	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
T
Thomas Gleixner 已提交
437 438
	pagefault_enable();

439
	return ret;
T
Thomas Gleixner 已提交
440 441 442
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
443 444 445
{
	int ret;

446
	pagefault_disable();
447
	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
448
	pagefault_enable();
L
Linus Torvalds 已提交
449 450 451 452

	return ret ? -EFAULT : 0;
}

453 454 455 456 457 458 459 460 461 462 463

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

464
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
465 466 467 468 469 470 471 472

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
	atomic_set(&pi_state->refcount, 1);
473
	pi_state->key = FUTEX_KEY_INIT;
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499

	current->pi_state_cache = pi_state;

	return 0;
}

static struct futex_pi_state * alloc_pi_state(void)
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

static void free_pi_state(struct futex_pi_state *pi_state)
{
	if (!atomic_dec_and_test(&pi_state->refcount))
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
500
		raw_spin_lock_irq(&pi_state->owner->pi_lock);
501
		list_del_init(&pi_state->list);
502
		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528

		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
	}

	if (current->pi_state_cache)
		kfree(pi_state);
	else {
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
		atomic_set(&pi_state->refcount, 1);
		current->pi_state_cache = pi_state;
	}
}

/*
 * Look up the task based on what TID userspace gave us.
 * We dont trust it.
 */
static struct task_struct * futex_find_get_task(pid_t pid)
{
	struct task_struct *p;

529
	rcu_read_lock();
530
	p = find_task_by_vpid(pid);
531 532
	if (p)
		get_task_struct(p);
533

534
	rcu_read_unlock();
535 536 537 538 539 540 541 542 543 544 545 546 547

	return p;
}

/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
void exit_pi_state_list(struct task_struct *curr)
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
548
	struct futex_hash_bucket *hb;
549
	union futex_key key = FUTEX_KEY_INIT;
550

551 552
	if (!futex_cmpxchg_enabled)
		return;
553 554 555
	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
556
	 * versus waiters unqueueing themselves:
557
	 */
558
	raw_spin_lock_irq(&curr->pi_lock);
559 560 561 562 563
	while (!list_empty(head)) {

		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
564
		hb = hash_futex(&key);
565
		raw_spin_unlock_irq(&curr->pi_lock);
566 567 568

		spin_lock(&hb->lock);

569
		raw_spin_lock_irq(&curr->pi_lock);
570 571 572 573
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
574 575 576 577 578 579
		if (head->next != next) {
			spin_unlock(&hb->lock);
			continue;
		}

		WARN_ON(pi_state->owner != curr);
580 581
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
582
		pi_state->owner = NULL;
583
		raw_spin_unlock_irq(&curr->pi_lock);
584 585 586 587 588

		rt_mutex_unlock(&pi_state->pi_mutex);

		spin_unlock(&hb->lock);

589
		raw_spin_lock_irq(&curr->pi_lock);
590
	}
591
	raw_spin_unlock_irq(&curr->pi_lock);
592 593 594
}

static int
P
Pierre Peiffer 已提交
595 596
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
		union futex_key *key, struct futex_pi_state **ps)
597 598 599
{
	struct futex_pi_state *pi_state = NULL;
	struct futex_q *this, *next;
P
Pierre Peiffer 已提交
600
	struct plist_head *head;
601
	struct task_struct *p;
602
	pid_t pid = uval & FUTEX_TID_MASK;
603 604 605

	head = &hb->chain;

P
Pierre Peiffer 已提交
606
	plist_for_each_entry_safe(this, next, head, list) {
P
Pierre Peiffer 已提交
607
		if (match_futex(&this->key, key)) {
608 609 610 611 612
			/*
			 * Another waiter already exists - bump up
			 * the refcount and return its pi_state:
			 */
			pi_state = this->pi_state;
613
			/*
614
			 * Userspace might have messed up non-PI and PI futexes
615 616 617 618
			 */
			if (unlikely(!pi_state))
				return -EINVAL;

619
			WARN_ON(!atomic_read(&pi_state->refcount));
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638

			/*
			 * When pi_state->owner is NULL then the owner died
			 * and another waiter is on the fly. pi_state->owner
			 * is fixed up by the task which acquires
			 * pi_state->rt_mutex.
			 *
			 * We do not check for pid == 0 which can happen when
			 * the owner died and robust_list_exit() cleared the
			 * TID.
			 */
			if (pid && pi_state->owner) {
				/*
				 * Bail out if user space manipulated the
				 * futex value.
				 */
				if (pid != task_pid_vnr(pi_state->owner))
					return -EINVAL;
			}
639

640
			atomic_inc(&pi_state->refcount);
P
Pierre Peiffer 已提交
641
			*ps = pi_state;
642 643 644 645 646 647

			return 0;
		}
	}

	/*
648
	 * We are the first waiter - try to look up the real owner and attach
649
	 * the new pi_state to it, but bail out when TID = 0
650
	 */
651
	if (!pid)
652
		return -ESRCH;
653
	p = futex_find_get_task(pid);
654 655
	if (!p)
		return -ESRCH;
656 657 658 659 660 661 662

	/*
	 * We need to look at the task state flags to figure out,
	 * whether the task is exiting. To protect against the do_exit
	 * change of the task flags, we do this protected by
	 * p->pi_lock:
	 */
663
	raw_spin_lock_irq(&p->pi_lock);
664 665 666 667 668 669 670 671
	if (unlikely(p->flags & PF_EXITING)) {
		/*
		 * The task is on the way out. When PF_EXITPIDONE is
		 * set, we know that the task has finished the
		 * cleanup:
		 */
		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;

672
		raw_spin_unlock_irq(&p->pi_lock);
673 674 675
		put_task_struct(p);
		return ret;
	}
676 677 678 679 680 681 682 683 684 685

	pi_state = alloc_pi_state();

	/*
	 * Initialize the pi_mutex in locked state and make 'p'
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
686
	pi_state->key = *key;
687

688
	WARN_ON(!list_empty(&pi_state->list));
689 690
	list_add(&pi_state->list, &p->pi_state_list);
	pi_state->owner = p;
691
	raw_spin_unlock_irq(&p->pi_lock);
692 693 694

	put_task_struct(p);

P
Pierre Peiffer 已提交
695
	*ps = pi_state;
696 697 698 699

	return 0;
}

700
/**
701
 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
702 703 704 705 706 707 708 709
 * @uaddr:		the pi futex user address
 * @hb:			the pi futex hash bucket
 * @key:		the futex key associated with uaddr and hb
 * @ps:			the pi_state pointer where we store the result of the
 *			lookup
 * @task:		the task to perform the atomic lock work for.  This will
 *			be "current" except in the case of requeue pi.
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
710
 *
711 712 713
 * Return:
 *  0 - ready to wait;
 *  1 - acquired the lock;
714 715 716 717 718 719 720
 * <0 - error
 *
 * The hb->lock and futex_key refs shall be held by the caller.
 */
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
				union futex_key *key,
				struct futex_pi_state **ps,
721
				struct task_struct *task, int set_waiters)
722
{
723
	int lock_taken, ret, force_take = 0;
724
	u32 uval, newval, curval, vpid = task_pid_vnr(task);
725 726 727 728 729 730 731 732 733

retry:
	ret = lock_taken = 0;

	/*
	 * To avoid races, we attempt to take the lock here again
	 * (by doing a 0 -> TID atomic cmpxchg), while holding all
	 * the locks. It will most likely not succeed.
	 */
734
	newval = vpid;
735 736
	if (set_waiters)
		newval |= FUTEX_WAITERS;
737

738
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
739 740 741 742 743
		return -EFAULT;

	/*
	 * Detect deadlocks.
	 */
744
	if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
		return -EDEADLK;

	/*
	 * Surprise - we got the lock. Just return to userspace:
	 */
	if (unlikely(!curval))
		return 1;

	uval = curval;

	/*
	 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
	 * to wake at the next unlock.
	 */
	newval = curval | FUTEX_WAITERS;

	/*
762
	 * Should we force take the futex? See below.
763
	 */
764 765 766 767 768
	if (unlikely(force_take)) {
		/*
		 * Keep the OWNER_DIED and the WAITERS bit and set the
		 * new TID value.
		 */
769
		newval = (curval & ~FUTEX_TID_MASK) | vpid;
770
		force_take = 0;
771 772 773
		lock_taken = 1;
	}

774
	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
775 776 777 778 779
		return -EFAULT;
	if (unlikely(curval != uval))
		goto retry;

	/*
780
	 * We took the lock due to forced take over.
781 782 783 784 785 786 787 788 789 790 791 792 793 794
	 */
	if (unlikely(lock_taken))
		return 1;

	/*
	 * We dont have the lock. Look up the PI state (or create it if
	 * we are the first waiter):
	 */
	ret = lookup_pi_state(uval, hb, key, ps);

	if (unlikely(ret)) {
		switch (ret) {
		case -ESRCH:
			/*
795 796 797 798 799 800 801 802
			 * We failed to find an owner for this
			 * futex. So we have no pi_state to block
			 * on. This can happen in two cases:
			 *
			 * 1) The owner died
			 * 2) A stale FUTEX_WAITERS bit
			 *
			 * Re-read the futex value.
803 804 805 806 807
			 */
			if (get_futex_value_locked(&curval, uaddr))
				return -EFAULT;

			/*
808 809 810
			 * If the owner died or we have a stale
			 * WAITERS bit the owner TID in the user space
			 * futex is 0.
811
			 */
812 813
			if (!(curval & FUTEX_TID_MASK)) {
				force_take = 1;
814 815 816 817 818 819 820 821 822 823
				goto retry;
			}
		default:
			break;
		}
	}

	return ret;
}

824 825 826 827 828 829 830 831 832 833
/**
 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be NULL and must be held by the caller.
 */
static void __unqueue_futex(struct futex_q *q)
{
	struct futex_hash_bucket *hb;

834 835
	if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
	    || WARN_ON(plist_node_empty(&q->list)))
836 837 838 839 840 841
		return;

	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
	plist_del(&q->list, &hb->chain);
}

L
Linus Torvalds 已提交
842 843 844 845 846 847
/*
 * The hash bucket lock must be held when this is called.
 * Afterwards, the futex_q must not be accessed.
 */
static void wake_futex(struct futex_q *q)
{
T
Thomas Gleixner 已提交
848 849
	struct task_struct *p = q->task;

850 851 852
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
		return;

L
Linus Torvalds 已提交
853
	/*
T
Thomas Gleixner 已提交
854
	 * We set q->lock_ptr = NULL _before_ we wake up the task. If
855 856
	 * a non-futex wake up happens on another CPU then the task
	 * might exit and p would dereference a non-existing task
T
Thomas Gleixner 已提交
857 858
	 * struct. Prevent this by holding a reference on p across the
	 * wake up.
L
Linus Torvalds 已提交
859
	 */
T
Thomas Gleixner 已提交
860 861
	get_task_struct(p);

862
	__unqueue_futex(q);
L
Linus Torvalds 已提交
863
	/*
T
Thomas Gleixner 已提交
864 865 866 867
	 * The waiting task can free the futex_q as soon as
	 * q->lock_ptr = NULL is written, without taking any locks. A
	 * memory barrier is required here to prevent the following
	 * store to lock_ptr from getting ahead of the plist_del.
L
Linus Torvalds 已提交
868
	 */
869
	smp_wmb();
L
Linus Torvalds 已提交
870
	q->lock_ptr = NULL;
T
Thomas Gleixner 已提交
871 872 873

	wake_up_state(p, TASK_NORMAL);
	put_task_struct(p);
L
Linus Torvalds 已提交
874 875
}

876 877 878 879
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
	struct task_struct *new_owner;
	struct futex_pi_state *pi_state = this->pi_state;
880
	u32 uninitialized_var(curval), newval;
881 882 883 884

	if (!pi_state)
		return -EINVAL;

885 886 887 888 889 890 891
	/*
	 * If current does not own the pi_state then the futex is
	 * inconsistent and user space fiddled with the futex value.
	 */
	if (pi_state->owner != current)
		return -EINVAL;

892
	raw_spin_lock(&pi_state->pi_mutex.wait_lock);
893 894 895
	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);

	/*
896 897 898
	 * It is possible that the next waiter (the one that brought
	 * this owner to the kernel) timed out and is no longer
	 * waiting on the lock.
899 900 901 902 903 904 905 906 907
	 */
	if (!new_owner)
		new_owner = this->task;

	/*
	 * We pass it to the next owner. (The WAITERS bit is always
	 * kept enabled while there is PI state around. We must also
	 * preserve the owner died bit.)
	 */
908
	if (!(uval & FUTEX_OWNER_DIED)) {
909 910
		int ret = 0;

911
		newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
912

913
		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
914
			ret = -EFAULT;
915
		else if (curval != uval)
916 917
			ret = -EINVAL;
		if (ret) {
918
			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
919 920
			return ret;
		}
921
	}
922

923
	raw_spin_lock_irq(&pi_state->owner->pi_lock);
924 925
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
926
	raw_spin_unlock_irq(&pi_state->owner->pi_lock);
927

928
	raw_spin_lock_irq(&new_owner->pi_lock);
929
	WARN_ON(!list_empty(&pi_state->list));
930 931
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
932
	raw_spin_unlock_irq(&new_owner->pi_lock);
933

934
	raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
935 936 937 938 939 940 941
	rt_mutex_unlock(&pi_state->pi_mutex);

	return 0;
}

static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
942
	u32 uninitialized_var(oldval);
943 944 945 946 947

	/*
	 * There is no waiter, so we unlock the futex. The owner died
	 * bit has not to be preserved here. We are the owner:
	 */
948 949
	if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
		return -EFAULT;
950 951 952 953 954 955
	if (oldval != uval)
		return -EAGAIN;

	return 0;
}

I
Ingo Molnar 已提交
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

D
Darren Hart 已提交
972 973 974
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
975
	spin_unlock(&hb1->lock);
976 977
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
D
Darren Hart 已提交
978 979
}

L
Linus Torvalds 已提交
980
/*
D
Darren Hart 已提交
981
 * Wake up waiters matching bitset queued on this futex (uaddr).
L
Linus Torvalds 已提交
982
 */
983 984
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
985
{
986
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
987
	struct futex_q *this, *next;
P
Pierre Peiffer 已提交
988
	struct plist_head *head;
989
	union futex_key key = FUTEX_KEY_INIT;
L
Linus Torvalds 已提交
990 991
	int ret;

992 993 994
	if (!bitset)
		return -EINVAL;

995
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
L
Linus Torvalds 已提交
996 997 998
	if (unlikely(ret != 0))
		goto out;

999 1000 1001
	hb = hash_futex(&key);
	spin_lock(&hb->lock);
	head = &hb->chain;
L
Linus Torvalds 已提交
1002

P
Pierre Peiffer 已提交
1003
	plist_for_each_entry_safe(this, next, head, list) {
L
Linus Torvalds 已提交
1004
		if (match_futex (&this->key, &key)) {
1005
			if (this->pi_state || this->rt_waiter) {
1006 1007 1008
				ret = -EINVAL;
				break;
			}
1009 1010 1011 1012 1013

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

L
Linus Torvalds 已提交
1014 1015 1016 1017 1018 1019
			wake_futex(this);
			if (++ret >= nr_wake)
				break;
		}
	}

1020
	spin_unlock(&hb->lock);
1021
	put_futex_key(&key);
1022
out:
L
Linus Torvalds 已提交
1023 1024 1025
	return ret;
}

1026 1027 1028 1029
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
1030
static int
1031
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1032
	      int nr_wake, int nr_wake2, int op)
1033
{
1034
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1035
	struct futex_hash_bucket *hb1, *hb2;
P
Pierre Peiffer 已提交
1036
	struct plist_head *head;
1037
	struct futex_q *this, *next;
D
Darren Hart 已提交
1038
	int ret, op_ret;
1039

D
Darren Hart 已提交
1040
retry:
1041
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1042 1043
	if (unlikely(ret != 0))
		goto out;
1044
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1045
	if (unlikely(ret != 0))
1046
		goto out_put_key1;
1047

1048 1049
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
1050

D
Darren Hart 已提交
1051
retry_private:
T
Thomas Gleixner 已提交
1052
	double_lock_hb(hb1, hb2);
1053
	op_ret = futex_atomic_op_inuser(op, uaddr2);
1054 1055
	if (unlikely(op_ret < 0)) {

D
Darren Hart 已提交
1056
		double_unlock_hb(hb1, hb2);
1057

1058
#ifndef CONFIG_MMU
1059 1060 1061 1062
		/*
		 * we don't get EFAULT from MMU faults if we don't have an MMU,
		 * but we might get them from range checking
		 */
1063
		ret = op_ret;
1064
		goto out_put_keys;
1065 1066
#endif

1067 1068
		if (unlikely(op_ret != -EFAULT)) {
			ret = op_ret;
1069
			goto out_put_keys;
1070 1071
		}

1072
		ret = fault_in_user_writeable(uaddr2);
1073
		if (ret)
1074
			goto out_put_keys;
1075

1076
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1077 1078
			goto retry_private;

1079 1080
		put_futex_key(&key2);
		put_futex_key(&key1);
D
Darren Hart 已提交
1081
		goto retry;
1082 1083
	}

1084
	head = &hb1->chain;
1085

P
Pierre Peiffer 已提交
1086
	plist_for_each_entry_safe(this, next, head, list) {
1087
		if (match_futex (&this->key, &key1)) {
1088 1089 1090 1091
			if (this->pi_state || this->rt_waiter) {
				ret = -EINVAL;
				goto out_unlock;
			}
1092 1093 1094 1095 1096 1097 1098
			wake_futex(this);
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
1099
		head = &hb2->chain;
1100 1101

		op_ret = 0;
P
Pierre Peiffer 已提交
1102
		plist_for_each_entry_safe(this, next, head, list) {
1103
			if (match_futex (&this->key, &key2)) {
1104 1105 1106 1107
				if (this->pi_state || this->rt_waiter) {
					ret = -EINVAL;
					goto out_unlock;
				}
1108 1109 1110 1111 1112 1113 1114 1115
				wake_futex(this);
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

1116
out_unlock:
D
Darren Hart 已提交
1117
	double_unlock_hb(hb1, hb2);
1118
out_put_keys:
1119
	put_futex_key(&key2);
1120
out_put_key1:
1121
	put_futex_key(&key1);
1122
out:
1123 1124 1125
	return ret;
}

D
Darren Hart 已提交
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
/**
 * requeue_futex() - Requeue a futex_q from one hb to another
 * @q:		the futex_q to requeue
 * @hb1:	the source hash_bucket
 * @hb2:	the target hash_bucket
 * @key2:	the new key for the requeued futex_q
 */
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
		   struct futex_hash_bucket *hb2, union futex_key *key2)
{

	/*
	 * If key1 and key2 hash to the same bucket, no need to
	 * requeue.
	 */
	if (likely(&hb1->chain != &hb2->chain)) {
		plist_del(&q->list, &hb1->chain);
		plist_add(&q->list, &hb2->chain);
		q->lock_ptr = &hb2->lock;
	}
	get_futex_key_refs(key2);
	q->key = *key2;
}

1151 1152
/**
 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1153 1154 1155
 * @q:		the futex_q
 * @key:	the key of the requeue target futex
 * @hb:		the hash_bucket of the requeue target futex
1156 1157 1158 1159 1160
 *
 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 * to the requeue target futex so the waiter can detect the wakeup on the right
 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1161 1162 1163
 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
 * to protect access to the pi_state to fixup the owner later.  Must be called
 * with both q->lock_ptr and hb->lock held.
1164 1165
 */
static inline
1166 1167
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
			   struct futex_hash_bucket *hb)
1168 1169 1170 1171
{
	get_futex_key_refs(key);
	q->key = *key;

1172
	__unqueue_futex(q);
1173 1174 1175 1176

	WARN_ON(!q->rt_waiter);
	q->rt_waiter = NULL;

1177 1178
	q->lock_ptr = &hb->lock;

T
Thomas Gleixner 已提交
1179
	wake_up_state(q->task, TASK_NORMAL);
1180 1181 1182 1183
}

/**
 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1184 1185 1186 1187 1188 1189 1190
 * @pifutex:		the user address of the to futex
 * @hb1:		the from futex hash bucket, must be locked by the caller
 * @hb2:		the to futex hash bucket, must be locked by the caller
 * @key1:		the from futex key
 * @key2:		the to futex key
 * @ps:			address to store the pi_state pointer
 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1191 1192
 *
 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1193 1194 1195
 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 * hb1 and hb2 must be held by the caller.
1196
 *
1197 1198 1199
 * Return:
 *  0 - failed to acquire the lock atomically;
 *  1 - acquired the lock;
1200 1201 1202 1203 1204 1205
 * <0 - error
 */
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
				 struct futex_hash_bucket *hb1,
				 struct futex_hash_bucket *hb2,
				 union futex_key *key1, union futex_key *key2,
1206
				 struct futex_pi_state **ps, int set_waiters)
1207
{
1208
	struct futex_q *top_waiter = NULL;
1209 1210 1211 1212 1213 1214
	u32 curval;
	int ret;

	if (get_futex_value_locked(&curval, pifutex))
		return -EFAULT;

1215 1216 1217 1218 1219 1220 1221 1222
	/*
	 * Find the top_waiter and determine if there are additional waiters.
	 * If the caller intends to requeue more than 1 waiter to pifutex,
	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
	 * as we have means to handle the possible fault.  If not, don't set
	 * the bit unecessarily as it will force the subsequent unlock to enter
	 * the kernel.
	 */
1223 1224 1225 1226 1227 1228
	top_waiter = futex_top_waiter(hb1, key1);

	/* There are no waiters, nothing for us to do. */
	if (!top_waiter)
		return 0;

1229 1230 1231 1232
	/* Ensure we requeue to the expected futex. */
	if (!match_futex(top_waiter->requeue_pi_key, key2))
		return -EINVAL;

1233
	/*
1234 1235 1236
	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
	 * the contended case or if set_waiters is 1.  The pi_state is returned
	 * in ps in contended cases.
1237
	 */
1238 1239
	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
				   set_waiters);
1240
	if (ret == 1)
1241
		requeue_pi_wake_futex(top_waiter, key2, hb2);
1242 1243 1244 1245 1246 1247

	return ret;
}

/**
 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1248
 * @uaddr1:	source futex user address
1249
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1250 1251 1252 1253 1254
 * @uaddr2:	target futex user address
 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
 * @cmpval:	@uaddr1 expected value (or %NULL)
 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1255
 *		pi futex (pi to pi requeue is not supported)
1256 1257 1258 1259
 *
 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 * uaddr2 atomically on behalf of the top waiter.
 *
1260 1261
 * Return:
 * >=0 - on success, the number of tasks requeued or woken;
1262
 *  <0 - on error
L
Linus Torvalds 已提交
1263
 */
1264 1265 1266
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
			 u32 *cmpval, int requeue_pi)
L
Linus Torvalds 已提交
1267
{
1268
	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1269 1270
	int drop_count = 0, task_count = 0, ret;
	struct futex_pi_state *pi_state = NULL;
1271
	struct futex_hash_bucket *hb1, *hb2;
P
Pierre Peiffer 已提交
1272
	struct plist_head *head1;
L
Linus Torvalds 已提交
1273
	struct futex_q *this, *next;
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
	u32 curval2;

	if (requeue_pi) {
		/*
		 * requeue_pi requires a pi_state, try to allocate it now
		 * without any locks in case it fails.
		 */
		if (refill_pi_state_cache())
			return -ENOMEM;
		/*
		 * requeue_pi must wake as many tasks as it can, up to nr_wake
		 * + nr_requeue, since it acquires the rt_mutex prior to
		 * returning to userspace, so as to not leave the rt_mutex with
		 * waiters and no owner.  However, second and third wake-ups
		 * cannot be predicted as they involve race conditions with the
		 * first wake and a fault while looking up the pi_state.  Both
		 * pthread_cond_signal() and pthread_cond_broadcast() should
		 * use nr_wake=1.
		 */
		if (nr_wake != 1)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
1296

1297
retry:
1298 1299 1300 1301 1302 1303 1304 1305 1306
	if (pi_state != NULL) {
		/*
		 * We will have to lookup the pi_state again, so free this one
		 * to keep the accounting correct.
		 */
		free_pi_state(pi_state);
		pi_state = NULL;
	}

1307
	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
L
Linus Torvalds 已提交
1308 1309
	if (unlikely(ret != 0))
		goto out;
1310 1311
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
			    requeue_pi ? VERIFY_WRITE : VERIFY_READ);
L
Linus Torvalds 已提交
1312
	if (unlikely(ret != 0))
1313
		goto out_put_key1;
L
Linus Torvalds 已提交
1314

1315 1316
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
1317

D
Darren Hart 已提交
1318
retry_private:
I
Ingo Molnar 已提交
1319
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1320

1321 1322
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
1323

1324
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
1325 1326

		if (unlikely(ret)) {
D
Darren Hart 已提交
1327
			double_unlock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1328

1329
			ret = get_user(curval, uaddr1);
D
Darren Hart 已提交
1330 1331
			if (ret)
				goto out_put_keys;
L
Linus Torvalds 已提交
1332

1333
			if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1334
				goto retry_private;
L
Linus Torvalds 已提交
1335

1336 1337
			put_futex_key(&key2);
			put_futex_key(&key1);
D
Darren Hart 已提交
1338
			goto retry;
L
Linus Torvalds 已提交
1339
		}
1340
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
1341 1342 1343 1344 1345
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

1346
	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1347 1348 1349 1350 1351 1352
		/*
		 * Attempt to acquire uaddr2 and wake the top waiter. If we
		 * intend to requeue waiters, force setting the FUTEX_WAITERS
		 * bit.  We force this here where we are able to easily handle
		 * faults rather in the requeue loop below.
		 */
1353
		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1354
						 &key2, &pi_state, nr_requeue);
1355 1356 1357 1358 1359 1360 1361 1362 1363

		/*
		 * At this point the top_waiter has either taken uaddr2 or is
		 * waiting on it.  If the former, then the pi_state will not
		 * exist yet, look it up one more time to ensure we have a
		 * reference to it.
		 */
		if (ret == 1) {
			WARN_ON(pi_state);
1364
			drop_count++;
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
			task_count++;
			ret = get_futex_value_locked(&curval2, uaddr2);
			if (!ret)
				ret = lookup_pi_state(curval2, hb2, &key2,
						      &pi_state);
		}

		switch (ret) {
		case 0:
			break;
		case -EFAULT:
			double_unlock_hb(hb1, hb2);
1377 1378
			put_futex_key(&key2);
			put_futex_key(&key1);
1379
			ret = fault_in_user_writeable(uaddr2);
1380 1381 1382 1383 1384 1385
			if (!ret)
				goto retry;
			goto out;
		case -EAGAIN:
			/* The owner was exiting, try again. */
			double_unlock_hb(hb1, hb2);
1386 1387
			put_futex_key(&key2);
			put_futex_key(&key1);
1388 1389 1390 1391 1392 1393 1394
			cond_resched();
			goto retry;
		default:
			goto out_unlock;
		}
	}

1395
	head1 = &hb1->chain;
P
Pierre Peiffer 已提交
1396
	plist_for_each_entry_safe(this, next, head1, list) {
1397 1398 1399 1400
		if (task_count - nr_wake >= nr_requeue)
			break;

		if (!match_futex(&this->key, &key1))
L
Linus Torvalds 已提交
1401
			continue;
1402

1403 1404 1405
		/*
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
		 * be paired with each other and no other futex ops.
1406 1407 1408
		 *
		 * We should never be requeueing a futex_q with a pi_state,
		 * which is awaiting a futex_unlock_pi().
1409 1410
		 */
		if ((requeue_pi && !this->rt_waiter) ||
1411 1412
		    (!requeue_pi && this->rt_waiter) ||
		    this->pi_state) {
1413 1414 1415
			ret = -EINVAL;
			break;
		}
1416 1417 1418 1419 1420 1421 1422

		/*
		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
		 * lock, we already woke the top_waiter.  If not, it will be
		 * woken by futex_unlock_pi().
		 */
		if (++task_count <= nr_wake && !requeue_pi) {
L
Linus Torvalds 已提交
1423
			wake_futex(this);
1424 1425
			continue;
		}
L
Linus Torvalds 已提交
1426

1427 1428 1429 1430 1431 1432
		/* Ensure we requeue to the expected futex for requeue_pi. */
		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
			ret = -EINVAL;
			break;
		}

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
		/*
		 * Requeue nr_requeue waiters and possibly one more in the case
		 * of requeue_pi if we couldn't acquire the lock atomically.
		 */
		if (requeue_pi) {
			/* Prepare the waiter to take the rt_mutex. */
			atomic_inc(&pi_state->refcount);
			this->pi_state = pi_state;
			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
							this->rt_waiter,
							this->task, 1);
			if (ret == 1) {
				/* We got the lock. */
1446
				requeue_pi_wake_futex(this, &key2, hb2);
1447
				drop_count++;
1448 1449 1450 1451 1452 1453 1454
				continue;
			} else if (ret) {
				/* -EDEADLK */
				this->pi_state = NULL;
				free_pi_state(pi_state);
				goto out_unlock;
			}
L
Linus Torvalds 已提交
1455
		}
1456 1457
		requeue_futex(this, hb1, hb2, &key2);
		drop_count++;
L
Linus Torvalds 已提交
1458 1459 1460
	}

out_unlock:
D
Darren Hart 已提交
1461
	double_unlock_hb(hb1, hb2);
L
Linus Torvalds 已提交
1462

1463 1464 1465 1466 1467 1468
	/*
	 * drop_futex_key_refs() must be called outside the spinlocks. During
	 * the requeue we moved futex_q's from the hash bucket at key1 to the
	 * one at key2 and updated their key pointer.  We no longer need to
	 * hold the references to key1.
	 */
L
Linus Torvalds 已提交
1469
	while (--drop_count >= 0)
1470
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
1471

1472
out_put_keys:
1473
	put_futex_key(&key2);
1474
out_put_key1:
1475
	put_futex_key(&key1);
1476
out:
1477 1478 1479
	if (pi_state != NULL)
		free_pi_state(pi_state);
	return ret ? ret : task_count;
L
Linus Torvalds 已提交
1480 1481 1482
}

/* The key must be already stored in q->key. */
E
Eric Sesterhenn 已提交
1483
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1484
	__acquires(&hb->lock)
L
Linus Torvalds 已提交
1485
{
1486
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1487

1488 1489
	hb = hash_futex(&q->key);
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
1490

1491 1492
	spin_lock(&hb->lock);
	return hb;
L
Linus Torvalds 已提交
1493 1494
}

1495 1496
static inline void
queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1497
	__releases(&hb->lock)
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
{
	spin_unlock(&hb->lock);
}

/**
 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
 * @q:	The futex_q to enqueue
 * @hb:	The destination hash bucket
 *
 * The hb->lock must be held by the caller, and is released here. A call to
 * queue_me() is typically paired with exactly one call to unqueue_me().  The
 * exceptions involve the PI related operations, which may use unqueue_me_pi()
 * or nothing if the unqueue is done as part of the wake process and the unqueue
 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
 * an example).
 */
E
Eric Sesterhenn 已提交
1514
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1515
	__releases(&hb->lock)
L
Linus Torvalds 已提交
1516
{
P
Pierre Peiffer 已提交
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
	plist_add(&q->list, &hb->chain);
1531
	q->task = current;
1532
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
1533 1534
}

1535 1536 1537 1538 1539 1540 1541
/**
 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
 * @q:	The futex_q to unqueue
 *
 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
 * be paired with exactly one earlier call to queue_me().
 *
1542 1543
 * Return:
 *   1 - if the futex_q was still queued (and we removed unqueued it);
1544
 *   0 - if the futex_q was already removed by the waking thread
L
Linus Torvalds 已提交
1545 1546 1547 1548
 */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
1549
	int ret = 0;
L
Linus Torvalds 已提交
1550 1551

	/* In the common case we don't take the spinlock, which is nice. */
1552
retry:
L
Linus Torvalds 已提交
1553
	lock_ptr = q->lock_ptr;
1554
	barrier();
1555
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
1574
		__unqueue_futex(q);
1575 1576 1577

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
1578 1579 1580 1581
		spin_unlock(lock_ptr);
		ret = 1;
	}

1582
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
1583 1584 1585
	return ret;
}

1586 1587
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
1588 1589
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
1590
 */
P
Pierre Peiffer 已提交
1591
static void unqueue_me_pi(struct futex_q *q)
1592
	__releases(q->lock_ptr)
1593
{
1594
	__unqueue_futex(q);
1595 1596 1597 1598 1599

	BUG_ON(!q->pi_state);
	free_pi_state(q->pi_state);
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
1600
	spin_unlock(q->lock_ptr);
1601 1602
}

P
Pierre Peiffer 已提交
1603
/*
1604
 * Fixup the pi_state owner with the new owner.
P
Pierre Peiffer 已提交
1605
 *
1606 1607
 * Must be called with hash bucket lock held and mm->sem held for non
 * private futexes.
P
Pierre Peiffer 已提交
1608
 */
1609
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1610
				struct task_struct *newowner)
P
Pierre Peiffer 已提交
1611
{
1612
	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Pierre Peiffer 已提交
1613
	struct futex_pi_state *pi_state = q->pi_state;
1614
	struct task_struct *oldowner = pi_state->owner;
1615
	u32 uval, uninitialized_var(curval), newval;
D
Darren Hart 已提交
1616
	int ret;
P
Pierre Peiffer 已提交
1617 1618

	/* Owner died? */
1619 1620 1621 1622 1623
	if (!pi_state->owner)
		newtid |= FUTEX_OWNER_DIED;

	/*
	 * We are here either because we stole the rtmutex from the
1624 1625 1626 1627
	 * previous highest priority waiter or we are the highest priority
	 * waiter but failed to get the rtmutex the first time.
	 * We have to replace the newowner TID in the user space variable.
	 * This must be atomic as we have to preserve the owner died bit here.
1628
	 *
D
Darren Hart 已提交
1629 1630 1631
	 * Note: We write the user space value _before_ changing the pi_state
	 * because we can fault here. Imagine swapped out pages or a fork
	 * that marked all the anonymous memory readonly for cow.
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
	 *
	 * Modifying pi_state _before_ the user space value would
	 * leave the pi_state in an inconsistent state when we fault
	 * here, because we need to drop the hash bucket lock to
	 * handle the fault. This might be observed in the PID check
	 * in lookup_pi_state.
	 */
retry:
	if (get_futex_value_locked(&uval, uaddr))
		goto handle_fault;

	while (1) {
		newval = (uval & FUTEX_OWNER_DIED) | newtid;

1646
		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
			goto handle_fault;
		if (curval == uval)
			break;
		uval = curval;
	}

	/*
	 * We fixed up user space. Now we need to fix the pi_state
	 * itself.
	 */
P
Pierre Peiffer 已提交
1657
	if (pi_state->owner != NULL) {
1658
		raw_spin_lock_irq(&pi_state->owner->pi_lock);
P
Pierre Peiffer 已提交
1659 1660
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
1661
		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1662
	}
P
Pierre Peiffer 已提交
1663

1664
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
1665

1666
	raw_spin_lock_irq(&newowner->pi_lock);
P
Pierre Peiffer 已提交
1667
	WARN_ON(!list_empty(&pi_state->list));
1668
	list_add(&pi_state->list, &newowner->pi_state_list);
1669
	raw_spin_unlock_irq(&newowner->pi_lock);
1670
	return 0;
P
Pierre Peiffer 已提交
1671 1672

	/*
1673
	 * To handle the page fault we need to drop the hash bucket
1674 1675
	 * lock here. That gives the other task (either the highest priority
	 * waiter itself or the task which stole the rtmutex) the
1676 1677 1678 1679 1680
	 * chance to try the fixup of the pi_state. So once we are
	 * back from handling the fault we need to check the pi_state
	 * after reacquiring the hash bucket lock and before trying to
	 * do another fixup. When the fixup has been done already we
	 * simply return.
P
Pierre Peiffer 已提交
1681
	 */
1682 1683
handle_fault:
	spin_unlock(q->lock_ptr);
1684

1685
	ret = fault_in_user_writeable(uaddr);
1686

1687
	spin_lock(q->lock_ptr);
1688

1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
	/*
	 * Check if someone else fixed it for us:
	 */
	if (pi_state->owner != oldowner)
		return 0;

	if (ret)
		return ret;

	goto retry;
P
Pierre Peiffer 已提交
1699 1700
}

N
Nick Piggin 已提交
1701
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
1702

1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
/**
 * fixup_owner() - Post lock pi_state and corner case management
 * @uaddr:	user address of the futex
 * @q:		futex_q (contains pi_state and access to the rt_mutex)
 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
 *
 * After attempting to lock an rt_mutex, this function is called to cleanup
 * the pi_state owner as well as handle race conditions that may allow us to
 * acquire the lock. Must be called with the hb lock held.
 *
1713 1714 1715
 * Return:
 *  1 - success, lock taken;
 *  0 - success, lock not taken;
1716 1717
 * <0 - on error (-EFAULT)
 */
1718
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
{
	struct task_struct *owner;
	int ret = 0;

	if (locked) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case:
		 */
		if (q->pi_state->owner != current)
1729
			ret = fixup_pi_state_owner(uaddr, q, current);
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
		goto out;
	}

	/*
	 * Catch the rare case, where the lock was released when we were on the
	 * way back before we locked the hash bucket.
	 */
	if (q->pi_state->owner == current) {
		/*
		 * Try to get the rt_mutex now. This might fail as some other
		 * task acquired the rt_mutex after we removed ourself from the
		 * rt_mutex waiters list.
		 */
		if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
			locked = 1;
			goto out;
		}

		/*
		 * pi_state is incorrect, some other task did a lock steal and
		 * we returned due to timeout or signal without taking the
1751
		 * rt_mutex. Too late.
1752
		 */
1753
		raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
1754
		owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1755 1756 1757
		if (!owner)
			owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
		raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
1758
		ret = fixup_pi_state_owner(uaddr, q, owner);
1759 1760 1761 1762 1763
		goto out;
	}

	/*
	 * Paranoia check. If we did not take the lock, then we should not be
1764
	 * the owner of the rt_mutex.
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
	 */
	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
				"pi-state %p\n", ret,
				q->pi_state->pi_mutex.owner,
				q->pi_state->owner);

out:
	return ret ? ret : locked;
}

1776 1777 1778 1779 1780 1781 1782
/**
 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 * @hb:		the futex hash bucket, must be locked by the caller
 * @q:		the futex_q to queue up on
 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
 */
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
T
Thomas Gleixner 已提交
1783
				struct hrtimer_sleeper *timeout)
1784
{
1785 1786 1787 1788 1789 1790
	/*
	 * The task state is guaranteed to be set before another task can
	 * wake it. set_current_state() is implemented using set_mb() and
	 * queue_me() calls spin_unlock() upon completion, both serializing
	 * access to the hash list and forcing another memory barrier.
	 */
T
Thomas Gleixner 已提交
1791
	set_current_state(TASK_INTERRUPTIBLE);
1792
	queue_me(q, hb);
1793 1794 1795 1796 1797 1798 1799 1800 1801

	/* Arm the timer */
	if (timeout) {
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
		if (!hrtimer_active(&timeout->timer))
			timeout->task = NULL;
	}

	/*
1802 1803
	 * If we have been removed from the hash list, then another task
	 * has tried to wake us, and we can skip the call to schedule().
1804 1805 1806 1807 1808 1809 1810 1811
	 */
	if (likely(!plist_node_empty(&q->list))) {
		/*
		 * If the timer has already expired, current will already be
		 * flagged for rescheduling. Only call schedule if there
		 * is no timeout, or if it has yet to expire.
		 */
		if (!timeout || timeout->task)
C
Colin Cross 已提交
1812
			freezable_schedule();
1813 1814 1815 1816
	}
	__set_current_state(TASK_RUNNING);
}

1817 1818 1819 1820
/**
 * futex_wait_setup() - Prepare to wait on a futex
 * @uaddr:	the futex userspace address
 * @val:	the expected value
1821
 * @flags:	futex flags (FLAGS_SHARED, etc.)
1822 1823 1824 1825 1826 1827 1828 1829
 * @q:		the associated futex_q
 * @hb:		storage for hash_bucket pointer to be returned to caller
 *
 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 * compare it with the expected value.  Handle atomic faults internally.
 * Return with the hb lock held and a q.key reference on success, and unlocked
 * with no q.key reference on failure.
 *
1830 1831
 * Return:
 *  0 - uaddr contains val and hb has been locked;
1832
 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
1833
 */
1834
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1835
			   struct futex_q *q, struct futex_hash_bucket **hb)
L
Linus Torvalds 已提交
1836
{
1837 1838
	u32 uval;
	int ret;
L
Linus Torvalds 已提交
1839 1840

	/*
D
Darren Hart 已提交
1841
	 * Access the page AFTER the hash-bucket is locked.
L
Linus Torvalds 已提交
1842 1843 1844 1845 1846 1847 1848
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
1849 1850
	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
	 * would open a race condition where we could block indefinitely with
L
Linus Torvalds 已提交
1851 1852
	 * cond(var) false, which would violate the guarantee.
	 *
1853 1854 1855 1856
	 * On the other hand, we insert q and release the hash-bucket only
	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
	 * absorb a wakeup if *uaddr does not match the desired values
	 * while the syscall executes.
L
Linus Torvalds 已提交
1857
	 */
1858
retry:
1859
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
1860
	if (unlikely(ret != 0))
1861
		return ret;
1862 1863 1864 1865

retry_private:
	*hb = queue_lock(q);

1866
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
1867

1868 1869
	if (ret) {
		queue_unlock(q, *hb);
L
Linus Torvalds 已提交
1870

1871
		ret = get_user(uval, uaddr);
D
Darren Hart 已提交
1872
		if (ret)
1873
			goto out;
L
Linus Torvalds 已提交
1874

1875
		if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
1876 1877
			goto retry_private;

1878
		put_futex_key(&q->key);
D
Darren Hart 已提交
1879
		goto retry;
L
Linus Torvalds 已提交
1880
	}
1881

1882 1883 1884
	if (uval != val) {
		queue_unlock(q, *hb);
		ret = -EWOULDBLOCK;
P
Peter Zijlstra 已提交
1885
	}
L
Linus Torvalds 已提交
1886

1887 1888
out:
	if (ret)
1889
		put_futex_key(&q->key);
1890 1891 1892
	return ret;
}

1893 1894
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
		      ktime_t *abs_time, u32 bitset)
1895 1896 1897 1898
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct restart_block *restart;
	struct futex_hash_bucket *hb;
1899
	struct futex_q q = futex_q_init;
1900 1901 1902 1903 1904 1905 1906 1907 1908
	int ret;

	if (!bitset)
		return -EINVAL;
	q.bitset = bitset;

	if (abs_time) {
		to = &timeout;

1909 1910 1911
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
1912 1913 1914 1915 1916
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

T
Thomas Gleixner 已提交
1917
retry:
1918 1919 1920 1921
	/*
	 * Prepare to wait on uaddr. On success, holds hb lock and increments
	 * q.key refs.
	 */
1922
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
1923 1924 1925
	if (ret)
		goto out;

1926
	/* queue_me and wait for wakeup, timeout, or a signal. */
T
Thomas Gleixner 已提交
1927
	futex_wait_queue_me(hb, &q, to);
L
Linus Torvalds 已提交
1928 1929

	/* If we were woken (and unqueued), we succeeded, whatever. */
P
Peter Zijlstra 已提交
1930
	ret = 0;
1931
	/* unqueue_me() drops q.key ref */
L
Linus Torvalds 已提交
1932
	if (!unqueue_me(&q))
1933
		goto out;
P
Peter Zijlstra 已提交
1934
	ret = -ETIMEDOUT;
1935
	if (to && !to->task)
1936
		goto out;
N
Nick Piggin 已提交
1937

1938
	/*
T
Thomas Gleixner 已提交
1939 1940
	 * We expect signal_pending(current), but we might be the
	 * victim of a spurious wakeup as well.
1941
	 */
1942
	if (!signal_pending(current))
T
Thomas Gleixner 已提交
1943 1944
		goto retry;

P
Peter Zijlstra 已提交
1945
	ret = -ERESTARTSYS;
1946
	if (!abs_time)
1947
		goto out;
L
Linus Torvalds 已提交
1948

P
Peter Zijlstra 已提交
1949 1950
	restart = &current_thread_info()->restart_block;
	restart->fn = futex_wait_restart;
1951
	restart->futex.uaddr = uaddr;
P
Peter Zijlstra 已提交
1952 1953 1954
	restart->futex.val = val;
	restart->futex.time = abs_time->tv64;
	restart->futex.bitset = bitset;
1955
	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
1956

P
Peter Zijlstra 已提交
1957 1958
	ret = -ERESTART_RESTARTBLOCK;

1959
out:
1960 1961 1962 1963
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
1964 1965 1966
	return ret;
}

N
Nick Piggin 已提交
1967 1968 1969

static long futex_wait_restart(struct restart_block *restart)
{
1970
	u32 __user *uaddr = restart->futex.uaddr;
1971
	ktime_t t, *tp = NULL;
N
Nick Piggin 已提交
1972

1973 1974 1975 1976
	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
		t.tv64 = restart->futex.time;
		tp = &t;
	}
N
Nick Piggin 已提交
1977
	restart->fn = do_no_restart_syscall;
1978 1979 1980

	return (long)futex_wait(uaddr, restart->futex.flags,
				restart->futex.val, tp, restart->futex.bitset);
N
Nick Piggin 已提交
1981 1982 1983
}


1984 1985 1986 1987 1988 1989
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
 * if there are waiters then it will block, it does PI, etc. (Due to
 * races the kernel might see a 0 value of the futex too.)
 */
1990 1991
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
			 ktime_t *time, int trylock)
1992
{
1993
	struct hrtimer_sleeper timeout, *to = NULL;
1994
	struct futex_hash_bucket *hb;
1995
	struct futex_q q = futex_q_init;
1996
	int res, ret;
1997 1998 1999 2000

	if (refill_pi_state_cache())
		return -ENOMEM;

2001
	if (time) {
2002
		to = &timeout;
2003 2004
		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
				      HRTIMER_MODE_ABS);
2005
		hrtimer_init_sleeper(to, current);
2006
		hrtimer_set_expires(&to->timer, *time);
2007 2008
	}

2009
retry:
2010
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2011
	if (unlikely(ret != 0))
2012
		goto out;
2013

D
Darren Hart 已提交
2014
retry_private:
E
Eric Sesterhenn 已提交
2015
	hb = queue_lock(&q);
2016

2017
	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2018
	if (unlikely(ret)) {
2019
		switch (ret) {
2020 2021 2022 2023 2024 2025
		case 1:
			/* We got the lock. */
			ret = 0;
			goto out_unlock_put_key;
		case -EFAULT:
			goto uaddr_faulted;
2026 2027 2028 2029 2030 2031
		case -EAGAIN:
			/*
			 * Task is exiting and we just wait for the
			 * exit to complete.
			 */
			queue_unlock(&q, hb);
2032
			put_futex_key(&q.key);
2033 2034 2035
			cond_resched();
			goto retry;
		default:
2036
			goto out_unlock_put_key;
2037 2038 2039 2040 2041 2042
		}
	}

	/*
	 * Only actually queue now that the atomic ops are done:
	 */
E
Eric Sesterhenn 已提交
2043
	queue_me(&q, hb);
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056

	WARN_ON(!q.pi_state);
	/*
	 * Block on the PI mutex:
	 */
	if (!trylock)
		ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
	else {
		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
	}

2057
	spin_lock(q.lock_ptr);
2058 2059 2060 2061
	/*
	 * Fixup the pi_state owner and possibly acquire the lock if we
	 * haven't already.
	 */
2062
	res = fixup_owner(uaddr, &q, !ret);
2063 2064 2065 2066 2067 2068
	/*
	 * If fixup_owner() returned an error, proprogate that.  If it acquired
	 * the lock, clear our -ETIMEDOUT or -EINTR.
	 */
	if (res)
		ret = (res < 0) ? res : 0;
2069

2070
	/*
2071 2072
	 * If fixup_owner() faulted and was unable to handle the fault, unlock
	 * it and return the fault to userspace.
2073 2074 2075 2076
	 */
	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
		rt_mutex_unlock(&q.pi_state->pi_mutex);

2077 2078
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
2079

2080
	goto out_put_key;
2081

2082
out_unlock_put_key:
2083 2084
	queue_unlock(&q, hb);

2085
out_put_key:
2086
	put_futex_key(&q.key);
2087
out:
2088 2089
	if (to)
		destroy_hrtimer_on_stack(&to->timer);
2090
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2091

2092
uaddr_faulted:
2093 2094
	queue_unlock(&q, hb);

2095
	ret = fault_in_user_writeable(uaddr);
D
Darren Hart 已提交
2096 2097
	if (ret)
		goto out_put_key;
2098

2099
	if (!(flags & FLAGS_SHARED))
D
Darren Hart 已提交
2100 2101
		goto retry_private;

2102
	put_futex_key(&q.key);
D
Darren Hart 已提交
2103
	goto retry;
2104 2105 2106 2107 2108 2109 2110
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
2111
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2112 2113 2114
{
	struct futex_hash_bucket *hb;
	struct futex_q *this, *next;
P
Pierre Peiffer 已提交
2115
	struct plist_head *head;
2116
	union futex_key key = FUTEX_KEY_INIT;
2117
	u32 uval, vpid = task_pid_vnr(current);
D
Darren Hart 已提交
2118
	int ret;
2119 2120 2121 2122 2123 2124 2125

retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
2126
	if ((uval & FUTEX_TID_MASK) != vpid)
2127 2128
		return -EPERM;

2129
	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
	if (unlikely(ret != 0))
		goto out;

	hb = hash_futex(&key);
	spin_lock(&hb->lock);

	/*
	 * To avoid races, try to do the TID -> 0 atomic transition
	 * again. If it succeeds then we can return without waking
	 * anyone else up:
	 */
2141 2142
	if (!(uval & FUTEX_OWNER_DIED) &&
	    cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2143 2144 2145 2146 2147
		goto pi_faulted;
	/*
	 * Rare case: we managed to release the lock atomically,
	 * no need to wake anyone else up:
	 */
2148
	if (unlikely(uval == vpid))
2149 2150 2151 2152 2153 2154 2155 2156
		goto out_unlock;

	/*
	 * Ok, other tasks may need to be woken up - check waiters
	 * and do the wakeup if necessary:
	 */
	head = &hb->chain;

P
Pierre Peiffer 已提交
2157
	plist_for_each_entry_safe(this, next, head, list) {
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
		if (!match_futex (&this->key, &key))
			continue;
		ret = wake_futex_pi(uaddr, uval, this);
		/*
		 * The atomic access to the futex value
		 * generated a pagefault, so retry the
		 * user-access and the wakeup:
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
		goto out_unlock;
	}
	/*
	 * No waiters - kernel unlocks the futex:
	 */
2173 2174 2175 2176 2177
	if (!(uval & FUTEX_OWNER_DIED)) {
		ret = unlock_futex_pi(uaddr, uval);
		if (ret == -EFAULT)
			goto pi_faulted;
	}
2178 2179 2180

out_unlock:
	spin_unlock(&hb->lock);
2181
	put_futex_key(&key);
2182

2183
out:
2184 2185 2186
	return ret;

pi_faulted:
2187
	spin_unlock(&hb->lock);
2188
	put_futex_key(&key);
2189

2190
	ret = fault_in_user_writeable(uaddr);
2191
	if (!ret)
2192 2193
		goto retry;

L
Linus Torvalds 已提交
2194 2195 2196
	return ret;
}

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208
/**
 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 * @hb:		the hash_bucket futex_q was original enqueued on
 * @q:		the futex_q woken while waiting to be requeued
 * @key2:	the futex_key of the requeue target futex
 * @timeout:	the timeout associated with the wait (NULL if none)
 *
 * Detect if the task was woken on the initial futex as opposed to the requeue
 * target futex.  If so, determine if it was a timeout or a signal that caused
 * the wakeup and return the appropriate error code to the caller.  Must be
 * called with the hb lock held.
 *
2209 2210 2211
 * Return:
 *  0 = no early wakeup detected;
 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
 */
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
				   struct futex_q *q, union futex_key *key2,
				   struct hrtimer_sleeper *timeout)
{
	int ret = 0;

	/*
	 * With the hb lock held, we avoid races while we process the wakeup.
	 * We only need to hold hb (and not hb2) to ensure atomicity as the
	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
	 * It can't be requeued from uaddr2 to something else since we don't
	 * support a PI aware source futex for requeue.
	 */
	if (!match_futex(&q->key, key2)) {
		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
		/*
		 * We were woken prior to requeue by a timeout or a signal.
		 * Unqueue the futex_q and determine which it was.
		 */
2233
		plist_del(&q->list, &hb->chain);
2234

T
Thomas Gleixner 已提交
2235
		/* Handle spurious wakeups gracefully */
2236
		ret = -EWOULDBLOCK;
2237 2238
		if (timeout && !timeout->task)
			ret = -ETIMEDOUT;
T
Thomas Gleixner 已提交
2239
		else if (signal_pending(current))
2240
			ret = -ERESTARTNOINTR;
2241 2242 2243 2244 2245 2246
	}
	return ret;
}

/**
 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2247
 * @uaddr:	the futex we initially wait on (non-pi)
2248
 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2249 2250 2251
 * 		the same type, no requeueing from private to shared, etc.
 * @val:	the expected value of uaddr
 * @abs_time:	absolute timeout
2252
 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
2253 2254 2255
 * @uaddr2:	the pi futex we will take prior to returning to user-space
 *
 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2256 2257 2258 2259 2260
 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
 * without one, the pi logic would not know which task to boost/deboost, if
 * there was a need to.
2261 2262
 *
 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2263
 * via the following--
2264
 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2265 2266 2267
 * 2) wakeup on uaddr2 after a requeue
 * 3) signal
 * 4) timeout
2268
 *
2269
 * If 3, cleanup and return -ERESTARTNOINTR.
2270 2271 2272 2273 2274 2275 2276
 *
 * If 2, we may then block on trying to take the rt_mutex and return via:
 * 5) successful lock
 * 6) signal
 * 7) timeout
 * 8) other lock acquisition failure
 *
2277
 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2278 2279 2280
 *
 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 *
2281 2282
 * Return:
 *  0 - On success;
2283 2284
 * <0 - On error
 */
2285
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2286
				 u32 val, ktime_t *abs_time, u32 bitset,
2287
				 u32 __user *uaddr2)
2288 2289 2290 2291 2292
{
	struct hrtimer_sleeper timeout, *to = NULL;
	struct rt_mutex_waiter rt_waiter;
	struct rt_mutex *pi_mutex = NULL;
	struct futex_hash_bucket *hb;
2293 2294
	union futex_key key2 = FUTEX_KEY_INIT;
	struct futex_q q = futex_q_init;
2295 2296
	int res, ret;

2297 2298 2299
	if (uaddr == uaddr2)
		return -EINVAL;

2300 2301 2302 2303 2304
	if (!bitset)
		return -EINVAL;

	if (abs_time) {
		to = &timeout;
2305 2306 2307
		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
				      CLOCK_REALTIME : CLOCK_MONOTONIC,
				      HRTIMER_MODE_ABS);
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
		hrtimer_init_sleeper(to, current);
		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
					     current->timer_slack_ns);
	}

	/*
	 * The waiter is allocated on our stack, manipulated by the requeue
	 * code while we sleep on uaddr.
	 */
	debug_rt_mutex_init_waiter(&rt_waiter);
	rt_waiter.task = NULL;

2320
	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2321 2322 2323
	if (unlikely(ret != 0))
		goto out;

2324 2325 2326 2327
	q.bitset = bitset;
	q.rt_waiter = &rt_waiter;
	q.requeue_pi_key = &key2;

2328 2329 2330 2331
	/*
	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
	 * count.
	 */
2332
	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
T
Thomas Gleixner 已提交
2333 2334
	if (ret)
		goto out_key2;
2335 2336

	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
T
Thomas Gleixner 已提交
2337
	futex_wait_queue_me(hb, &q, to);
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348

	spin_lock(&hb->lock);
	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
	spin_unlock(&hb->lock);
	if (ret)
		goto out_put_keys;

	/*
	 * In order for us to be here, we know our q.key == key2, and since
	 * we took the hb->lock above, we also know that futex_requeue() has
	 * completed and we no longer have to concern ourselves with a wakeup
2349 2350 2351
	 * race with the atomic proxy lock acquisition by the requeue code. The
	 * futex_requeue dropped our key1 reference and incremented our key2
	 * reference count.
2352 2353 2354 2355 2356 2357 2358 2359 2360 2361
	 */

	/* Check if the requeue code acquired the second futex for us. */
	if (!q.rt_waiter) {
		/*
		 * Got the lock. We might not be the anticipated owner if we
		 * did a lock-steal - fix up the PI-state in that case.
		 */
		if (q.pi_state && (q.pi_state->owner != current)) {
			spin_lock(q.lock_ptr);
2362
			ret = fixup_pi_state_owner(uaddr2, &q, current);
2363 2364 2365 2366 2367 2368 2369 2370
			spin_unlock(q.lock_ptr);
		}
	} else {
		/*
		 * We have been woken up by futex_unlock_pi(), a timeout, or a
		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
		 * the pi_state.
		 */
2371
		WARN_ON(!q.pi_state);
2372 2373 2374 2375 2376 2377 2378 2379 2380
		pi_mutex = &q.pi_state->pi_mutex;
		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
		debug_rt_mutex_free_waiter(&rt_waiter);

		spin_lock(q.lock_ptr);
		/*
		 * Fixup the pi_state owner and possibly acquire the lock if we
		 * haven't already.
		 */
2381
		res = fixup_owner(uaddr2, &q, !ret);
2382 2383
		/*
		 * If fixup_owner() returned an error, proprogate that.  If it
2384
		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
		 */
		if (res)
			ret = (res < 0) ? res : 0;

		/* Unqueue and drop the lock. */
		unqueue_me_pi(&q);
	}

	/*
	 * If fixup_pi_state_owner() faulted and was unable to handle the
	 * fault, unlock the rt_mutex and return the fault to userspace.
	 */
	if (ret == -EFAULT) {
2398
		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2399 2400 2401
			rt_mutex_unlock(pi_mutex);
	} else if (ret == -EINTR) {
		/*
2402 2403 2404 2405 2406
		 * We've already been requeued, but cannot restart by calling
		 * futex_lock_pi() directly. We could restart this syscall, but
		 * it would detect that the user space "val" changed and return
		 * -EWOULDBLOCK.  Save the overhead of the restart and return
		 * -EWOULDBLOCK directly.
2407
		 */
2408
		ret = -EWOULDBLOCK;
2409 2410 2411
	}

out_put_keys:
2412
	put_futex_key(&q.key);
T
Thomas Gleixner 已提交
2413
out_key2:
2414
	put_futex_key(&key2);
2415 2416 2417 2418 2419 2420 2421 2422 2423

out:
	if (to) {
		hrtimer_cancel(&to->timer);
		destroy_hrtimer_on_stack(&to->timer);
	}
	return ret;
}

2424 2425 2426 2427 2428 2429 2430
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
2431
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2432 2433 2434 2435 2436 2437 2438 2439
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
2440 2441 2442
 * sys_set_robust_list() - Set the robust-futex list head of a task
 * @head:	pointer to the list-head
 * @len:	length of the list-head, as userspace expects
2443
 */
2444 2445
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
		size_t, len)
2446
{
2447 2448
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
2461 2462 2463 2464
 * sys_get_robust_list() - Get the robust-futex list head of a task
 * @pid:	pid of the process [zero for current task]
 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
 * @len_ptr:	pointer to a length field, the kernel fills in the header size
2465
 */
2466 2467 2468
SYSCALL_DEFINE3(get_robust_list, int, pid,
		struct robust_list_head __user * __user *, head_ptr,
		size_t __user *, len_ptr)
2469
{
A
Al Viro 已提交
2470
	struct robust_list_head __user *head;
2471
	unsigned long ret;
2472
	struct task_struct *p;
2473

2474 2475 2476
	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

2477 2478 2479
	rcu_read_lock();

	ret = -ESRCH;
2480
	if (!pid)
2481
		p = current;
2482
	else {
2483
		p = find_task_by_vpid(pid);
2484 2485 2486 2487
		if (!p)
			goto err_unlock;
	}

2488 2489 2490 2491 2492 2493 2494
	ret = -EPERM;
	if (!ptrace_may_access(p, PTRACE_MODE_READ))
		goto err_unlock;

	head = p->robust_list;
	rcu_read_unlock();

2495 2496 2497 2498 2499
	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
2500
	rcu_read_unlock();
2501 2502 2503 2504 2505 2506 2507 2508

	return ret;
}

/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
2509
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2510
{
2511
	u32 uval, uninitialized_var(nval), mval;
2512

2513 2514
retry:
	if (get_user(uval, uaddr))
2515 2516
		return -1;

2517
	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
		/*
		 * Ok, this dying thread is truly holding a futex
		 * of interest. Set the OWNER_DIED bit atomically
		 * via cmpxchg, and if the value had FUTEX_WAITERS
		 * set, wake up a waiter (if any). (We have to do a
		 * futex_wake() even if OWNER_DIED is already set -
		 * to handle the rare but possible case of recursive
		 * thread-death.) The rest of the cleanup is done in
		 * userspace.
		 */
2528
		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
		/*
		 * We are not holding a lock here, but we want to have
		 * the pagefault_disable/enable() protection because
		 * we want to handle the fault gracefully. If the
		 * access fails we try to fault in the futex with R/W
		 * verification via get_user_pages. get_user() above
		 * does not guarantee R/W access. If that fails we
		 * give up and leave the futex locked.
		 */
		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
			if (fault_in_user_writeable(uaddr))
				return -1;
			goto retry;
		}
2543
		if (nval != uval)
2544
			goto retry;
2545

2546 2547 2548 2549
		/*
		 * Wake robust non-PI futexes here. The wakeup of
		 * PI futexes happens in exit_pi_state():
		 */
T
Thomas Gleixner 已提交
2550
		if (!pi && (uval & FUTEX_WAITERS))
P
Peter Zijlstra 已提交
2551
			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2552 2553 2554 2555
	}
	return 0;
}

2556 2557 2558 2559
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
2560
				     struct robust_list __user * __user *head,
2561
				     unsigned int *pi)
2562 2563 2564
{
	unsigned long uentry;

A
Al Viro 已提交
2565
	if (get_user(uentry, (unsigned long __user *)head))
2566 2567
		return -EFAULT;

A
Al Viro 已提交
2568
	*entry = (void __user *)(uentry & ~1UL);
2569 2570 2571 2572 2573
	*pi = uentry & 1;

	return 0;
}

2574 2575 2576 2577 2578 2579 2580 2581 2582
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void exit_robust_list(struct task_struct *curr)
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
2583
	struct robust_list __user *entry, *next_entry, *pending;
2584 2585
	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
	unsigned int uninitialized_var(next_pi);
2586
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
2587
	int rc;
2588

2589 2590 2591
	if (!futex_cmpxchg_enabled)
		return;

2592 2593 2594 2595
	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
2596
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
2607
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2608
		return;
2609

M
Martin Schwidefsky 已提交
2610
	next_entry = NULL;	/* avoid warning with gcc */
2611
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
2612 2613 2614 2615 2616
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2617 2618
		/*
		 * A pending lock might already be on the list, so
2619
		 * don't process it twice:
2620 2621
		 */
		if (entry != pending)
A
Al Viro 已提交
2622
			if (handle_futex_death((void __user *)entry + futex_offset,
2623
						curr, pi))
2624
				return;
M
Martin Schwidefsky 已提交
2625
		if (rc)
2626
			return;
M
Martin Schwidefsky 已提交
2627 2628
		entry = next_entry;
		pi = next_pi;
2629 2630 2631 2632 2633 2634 2635 2636
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
2637 2638 2639 2640

	if (pending)
		handle_futex_death((void __user *)pending + futex_offset,
				   curr, pip);
2641 2642
}

2643
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2644
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
2645
{
T
Thomas Gleixner 已提交
2646
	int cmd = op & FUTEX_CMD_MASK;
2647
	unsigned int flags = 0;
E
Eric Dumazet 已提交
2648 2649

	if (!(op & FUTEX_PRIVATE_FLAG))
2650
		flags |= FLAGS_SHARED;
L
Linus Torvalds 已提交
2651

2652 2653 2654 2655 2656
	if (op & FUTEX_CLOCK_REALTIME) {
		flags |= FLAGS_CLOCKRT;
		if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
			return -ENOSYS;
	}
L
Linus Torvalds 已提交
2657

2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
	switch (cmd) {
	case FUTEX_LOCK_PI:
	case FUTEX_UNLOCK_PI:
	case FUTEX_TRYLOCK_PI:
	case FUTEX_WAIT_REQUEUE_PI:
	case FUTEX_CMP_REQUEUE_PI:
		if (!futex_cmpxchg_enabled)
			return -ENOSYS;
	}

E
Eric Dumazet 已提交
2668
	switch (cmd) {
L
Linus Torvalds 已提交
2669
	case FUTEX_WAIT:
2670 2671
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAIT_BITSET:
T
Thomas Gleixner 已提交
2672
		return futex_wait(uaddr, flags, val, timeout, val3);
L
Linus Torvalds 已提交
2673
	case FUTEX_WAKE:
2674 2675
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAKE_BITSET:
T
Thomas Gleixner 已提交
2676
		return futex_wake(uaddr, flags, val, val3);
L
Linus Torvalds 已提交
2677
	case FUTEX_REQUEUE:
T
Thomas Gleixner 已提交
2678
		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
L
Linus Torvalds 已提交
2679
	case FUTEX_CMP_REQUEUE:
T
Thomas Gleixner 已提交
2680
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2681
	case FUTEX_WAKE_OP:
T
Thomas Gleixner 已提交
2682
		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2683
	case FUTEX_LOCK_PI:
T
Thomas Gleixner 已提交
2684
		return futex_lock_pi(uaddr, flags, val, timeout, 0);
2685
	case FUTEX_UNLOCK_PI:
T
Thomas Gleixner 已提交
2686
		return futex_unlock_pi(uaddr, flags);
2687
	case FUTEX_TRYLOCK_PI:
T
Thomas Gleixner 已提交
2688
		return futex_lock_pi(uaddr, flags, 0, timeout, 1);
2689 2690
	case FUTEX_WAIT_REQUEUE_PI:
		val3 = FUTEX_BITSET_MATCH_ANY;
T
Thomas Gleixner 已提交
2691 2692
		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
					     uaddr2);
2693
	case FUTEX_CMP_REQUEUE_PI:
T
Thomas Gleixner 已提交
2694
		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
L
Linus Torvalds 已提交
2695
	}
T
Thomas Gleixner 已提交
2696
	return -ENOSYS;
L
Linus Torvalds 已提交
2697 2698 2699
}


2700 2701 2702
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
		struct timespec __user *, utime, u32 __user *, uaddr2,
		u32, val3)
L
Linus Torvalds 已提交
2703
{
2704 2705
	struct timespec ts;
	ktime_t t, *tp = NULL;
2706
	u32 val2 = 0;
E
Eric Dumazet 已提交
2707
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
2708

2709
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2710 2711
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
2712
		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
L
Linus Torvalds 已提交
2713
			return -EFAULT;
2714
		if (!timespec_valid(&ts))
2715
			return -EINVAL;
2716 2717

		t = timespec_to_ktime(ts);
E
Eric Dumazet 已提交
2718
		if (cmd == FUTEX_WAIT)
2719
			t = ktime_add_safe(ktime_get(), t);
2720
		tp = &t;
L
Linus Torvalds 已提交
2721 2722
	}
	/*
2723
	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2724
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
2725
	 */
2726
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2727
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2728
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
2729

2730
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
2731 2732
}

2733
static int __init futex_init(void)
L
Linus Torvalds 已提交
2734
{
2735
	u32 curval;
T
Thomas Gleixner 已提交
2736
	int i;
A
Akinobu Mita 已提交
2737

2738 2739 2740 2741 2742 2743 2744
	/*
	 * This will fail and we want it. Some arch implementations do
	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
	 * functionality. We want to know that before we call in any
	 * of the complex code paths. Also we want to prevent
	 * registration of robust lists in that case. NULL is
	 * guaranteed to fault and we get -EFAULT on functional
2745
	 * implementation, the non-functional ones will return
2746 2747
	 * -ENOSYS.
	 */
2748
	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2749 2750
		futex_cmpxchg_enabled = 1;

T
Thomas Gleixner 已提交
2751
	for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2752
		plist_head_init(&futex_queues[i].chain);
T
Thomas Gleixner 已提交
2753 2754 2755
		spin_lock_init(&futex_queues[i].lock);
	}

L
Linus Torvalds 已提交
2756 2757
	return 0;
}
2758
__initcall(futex_init);