futex.c 51.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *  Fast Userspace Mutexes (which I call "Futexes!").
 *  (C) Rusty Russell, IBM 2002
 *
 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
 *
 *  Removed page pinning, fix privately mapped COW pages and other cleanups
 *  (C) Copyright 2003, 2004 Jamie Lokier
 *
11 12 13 14
 *  Robust futex support started by Ingo Molnar
 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
 *
15 16 17 18
 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
E
Eric Dumazet 已提交
19 20 21
 *  PRIVATE futexes by Eric Dumazet
 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
 *
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
 *  enough at me, Linus for the original (flawed) idea, Matthew
 *  Kirkwood for proof-of-concept implementation.
 *
 *  "The futexes are also cursed."
 *  "But they come in a choice of three flavours!"
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
53
#include <linux/signal.h>
54
#include <linux/module.h>
55
#include <linux/magic.h>
56 57 58
#include <linux/pid.h>
#include <linux/nsproxy.h>

59
#include <asm/futex.h>
L
Linus Torvalds 已提交
60

61 62
#include "rtmutex_common.h"

L
Linus Torvalds 已提交
63 64
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * Priority Inheritance state:
 */
struct futex_pi_state {
	/*
	 * list of 'owned' pi_state instances - these have to be
	 * cleaned up in do_exit() if the task exits prematurely:
	 */
	struct list_head list;

	/*
	 * The PI object:
	 */
	struct rt_mutex pi_mutex;

	struct task_struct *owner;
	atomic_t refcount;

	union futex_key key;
};

L
Linus Torvalds 已提交
86 87 88 89 90
/*
 * We use this hashed waitqueue instead of a normal wait_queue_t, so
 * we can wake only the relevant ones (hashed queues may be shared).
 *
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
P
Pierre Peiffer 已提交
91
 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
L
Linus Torvalds 已提交
92 93 94 95
 * The order of wakup is always to make the first condition true, then
 * wake up q->waiters, then make the second condition true.
 */
struct futex_q {
P
Pierre Peiffer 已提交
96
	struct plist_node list;
L
Linus Torvalds 已提交
97 98
	wait_queue_head_t waiters;

99
	/* Which hash list lock to use: */
L
Linus Torvalds 已提交
100 101
	spinlock_t *lock_ptr;

102
	/* Key which the futex is hashed on: */
L
Linus Torvalds 已提交
103 104
	union futex_key key;

105
	/* For fd, sigio sent using these: */
L
Linus Torvalds 已提交
106 107
	int fd;
	struct file *filp;
108 109 110 111

	/* Optional priority inheritance state: */
	struct futex_pi_state *pi_state;
	struct task_struct *task;
112 113 114

	/* Bitset for the optional bitmasked wakeup */
	u32 bitset;
L
Linus Torvalds 已提交
115 116 117 118 119 120
};

/*
 * Split the global futex_lock into every hash list lock.
 */
struct futex_hash_bucket {
P
Pierre Peiffer 已提交
121 122
	spinlock_t lock;
	struct plist_head chain;
L
Linus Torvalds 已提交
123 124 125 126 127 128 129
};

static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];

/* Futex-fs vfsmount entry: */
static struct vfsmount *futex_mnt;

T
Thomas Gleixner 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
/*
 * Take mm->mmap_sem, when futex is shared
 */
static inline void futex_lock_mm(struct rw_semaphore *fshared)
{
	if (fshared)
		down_read(fshared);
}

/*
 * Release mm->mmap_sem, when the futex is shared
 */
static inline void futex_unlock_mm(struct rw_semaphore *fshared)
{
	if (fshared)
		up_read(fshared);
}

L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/*
 * We hash on the keys returned from get_futex_key (see below).
 */
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
	u32 hash = jhash2((u32*)&key->both.word,
			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
			  key->both.offset);
	return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
}

/*
 * Return 1 if two futex_keys are equal, 0 otherwise.
 */
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
	return (key1->both.word == key2->both.word
		&& key1->both.ptr == key2->both.ptr
		&& key1->both.offset == key2->both.offset);
}

E
Eric Dumazet 已提交
169 170 171 172 173 174 175 176 177
/**
 * get_futex_key - Get parameters which are the keys for a futex.
 * @uaddr: virtual address of the futex
 * @shared: NULL for a PROCESS_PRIVATE futex,
 *	&current->mm->mmap_sem for a PROCESS_SHARED futex
 * @key: address where result is stored.
 *
 * Returns a negative error code or 0
 * The key words are stored in *key on success.
L
Linus Torvalds 已提交
178
 *
179
 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
L
Linus Torvalds 已提交
180 181 182
 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 * We can usually work out the index without swapping in the page.
 *
E
Eric Dumazet 已提交
183 184 185
 * fshared is NULL for PROCESS_PRIVATE futexes
 * For other futexes, it points to &current->mm->mmap_sem and
 * caller must have taken the reader lock. but NOT any spinlocks.
L
Linus Torvalds 已提交
186
 */
187 188
static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
			 union futex_key *key)
L
Linus Torvalds 已提交
189
{
190
	unsigned long address = (unsigned long)uaddr;
L
Linus Torvalds 已提交
191 192 193 194 195 196 197 198
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	struct page *page;
	int err;

	/*
	 * The futex address must be "naturally" aligned.
	 */
199
	key->both.offset = address % PAGE_SIZE;
E
Eric Dumazet 已提交
200
	if (unlikely((address % sizeof(u32)) != 0))
L
Linus Torvalds 已提交
201
		return -EINVAL;
202
	address -= key->both.offset;
L
Linus Torvalds 已提交
203

E
Eric Dumazet 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217
	/*
	 * PROCESS_PRIVATE futexes are fast.
	 * As the mm cannot disappear under us and the 'key' only needs
	 * virtual address, we dont even have to find the underlying vma.
	 * Note : We do have to check 'uaddr' is a valid user address,
	 *        but access_ok() should be faster than find_vma()
	 */
	if (!fshared) {
		if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
			return -EFAULT;
		key->private.mm = mm;
		key->private.address = address;
		return 0;
	}
L
Linus Torvalds 已提交
218 219 220 221
	/*
	 * The futex is hashed differently depending on whether
	 * it's in a shared or private mapping.  So check vma first.
	 */
222
	vma = find_extend_vma(mm, address);
L
Linus Torvalds 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	if (unlikely(!vma))
		return -EFAULT;

	/*
	 * Permissions.
	 */
	if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
		return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;

	/*
	 * Private mappings are handled in a simple way.
	 *
	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
	 * it's a read-only handle, it's expected that futexes attach to
	 * the object not the particular process.  Therefore we use
	 * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
	 * mappings of _writable_ handles.
	 */
	if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
E
Eric Dumazet 已提交
242
		key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
L
Linus Torvalds 已提交
243
		key->private.mm = mm;
244
		key->private.address = address;
L
Linus Torvalds 已提交
245 246 247 248 249 250
		return 0;
	}

	/*
	 * Linear file mappings are also simple.
	 */
251
	key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
E
Eric Dumazet 已提交
252
	key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
L
Linus Torvalds 已提交
253
	if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
254
		key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
L
Linus Torvalds 已提交
255 256 257 258 259 260 261 262 263 264
				     + vma->vm_pgoff);
		return 0;
	}

	/*
	 * We could walk the page table to read the non-linear
	 * pte, and get the page index without fetching the page
	 * from swap.  But that's a lot of code to duplicate here
	 * for a rare case, so we simply fetch the page.
	 */
265
	err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
L
Linus Torvalds 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279
	if (err >= 0) {
		key->shared.pgoff =
			page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
		put_page(page);
		return 0;
	}
	return err;
}

/*
 * Take a reference to the resource addressed by a key.
 * Can be called while holding spinlocks.
 *
 */
280
static void get_futex_key_refs(union futex_key *key)
L
Linus Torvalds 已提交
281
{
E
Eric Dumazet 已提交
282 283 284 285
	if (key->both.ptr == 0)
		return;
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
		case FUT_OFF_INODE:
L
Linus Torvalds 已提交
286
			atomic_inc(&key->shared.inode->i_count);
E
Eric Dumazet 已提交
287 288
			break;
		case FUT_OFF_MMSHARED:
L
Linus Torvalds 已提交
289
			atomic_inc(&key->private.mm->mm_count);
E
Eric Dumazet 已提交
290
			break;
L
Linus Torvalds 已提交
291 292 293 294 295 296 297
	}
}

/*
 * Drop a reference to the resource addressed by a key.
 * The hash bucket spinlock must not be held.
 */
298
static void drop_futex_key_refs(union futex_key *key)
L
Linus Torvalds 已提交
299
{
300
	if (!key->both.ptr)
E
Eric Dumazet 已提交
301 302 303
		return;
	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
		case FUT_OFF_INODE:
L
Linus Torvalds 已提交
304
			iput(key->shared.inode);
E
Eric Dumazet 已提交
305 306
			break;
		case FUT_OFF_MMSHARED:
L
Linus Torvalds 已提交
307
			mmdrop(key->private.mm);
E
Eric Dumazet 已提交
308
			break;
L
Linus Torvalds 已提交
309 310 311
	}
}

T
Thomas Gleixner 已提交
312 313 314 315 316 317 318 319 320 321 322 323
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
{
	u32 curval;

	pagefault_disable();
	curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
	pagefault_enable();

	return curval;
}

static int get_futex_value_locked(u32 *dest, u32 __user *from)
L
Linus Torvalds 已提交
324 325 326
{
	int ret;

327
	pagefault_disable();
328
	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
329
	pagefault_enable();
L
Linus Torvalds 已提交
330 331 332 333

	return ret ? -EFAULT : 0;
}

334
/*
E
Eric Dumazet 已提交
335 336
 * Fault handling.
 * if fshared is non NULL, current->mm->mmap_sem is already held
337
 */
E
Eric Dumazet 已提交
338 339
static int futex_handle_fault(unsigned long address,
			      struct rw_semaphore *fshared, int attempt)
340 341 342
{
	struct vm_area_struct * vma;
	struct mm_struct *mm = current->mm;
E
Eric Dumazet 已提交
343
	int ret = -EFAULT;
344

E
Eric Dumazet 已提交
345 346
	if (attempt > 2)
		return ret;
347

E
Eric Dumazet 已提交
348 349 350 351 352
	if (!fshared)
		down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if (vma && address >= vma->vm_start &&
	    (vma->vm_flags & VM_WRITE)) {
N
Nick Piggin 已提交
353 354 355 356 357 358 359 360 361
		int fault;
		fault = handle_mm_fault(mm, vma, address, 1);
		if (unlikely((fault & VM_FAULT_ERROR))) {
#if 0
			/* XXX: let's do this when we verify it is OK */
			if (ret & VM_FAULT_OOM)
				ret = -ENOMEM;
#endif
		} else {
E
Eric Dumazet 已提交
362
			ret = 0;
N
Nick Piggin 已提交
363 364 365 366
			if (fault & VM_FAULT_MAJOR)
				current->maj_flt++;
			else
				current->min_flt++;
E
Eric Dumazet 已提交
367
		}
368
	}
E
Eric Dumazet 已提交
369 370 371
	if (!fshared)
		up_read(&mm->mmap_sem);
	return ret;
372 373 374 375 376 377 378 379 380 381 382 383
}

/*
 * PI code:
 */
static int refill_pi_state_cache(void)
{
	struct futex_pi_state *pi_state;

	if (likely(current->pi_state_cache))
		return 0;

384
	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447

	if (!pi_state)
		return -ENOMEM;

	INIT_LIST_HEAD(&pi_state->list);
	/* pi_mutex gets initialized later */
	pi_state->owner = NULL;
	atomic_set(&pi_state->refcount, 1);

	current->pi_state_cache = pi_state;

	return 0;
}

static struct futex_pi_state * alloc_pi_state(void)
{
	struct futex_pi_state *pi_state = current->pi_state_cache;

	WARN_ON(!pi_state);
	current->pi_state_cache = NULL;

	return pi_state;
}

static void free_pi_state(struct futex_pi_state *pi_state)
{
	if (!atomic_dec_and_test(&pi_state->refcount))
		return;

	/*
	 * If pi_state->owner is NULL, the owner is most probably dying
	 * and has cleaned up the pi_state already
	 */
	if (pi_state->owner) {
		spin_lock_irq(&pi_state->owner->pi_lock);
		list_del_init(&pi_state->list);
		spin_unlock_irq(&pi_state->owner->pi_lock);

		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
	}

	if (current->pi_state_cache)
		kfree(pi_state);
	else {
		/*
		 * pi_state->list is already empty.
		 * clear pi_state->owner.
		 * refcount is at 0 - put it back to 1.
		 */
		pi_state->owner = NULL;
		atomic_set(&pi_state->refcount, 1);
		current->pi_state_cache = pi_state;
	}
}

/*
 * Look up the task based on what TID userspace gave us.
 * We dont trust it.
 */
static struct task_struct * futex_find_get_task(pid_t pid)
{
	struct task_struct *p;

448
	rcu_read_lock();
449
	p = find_task_by_vpid(pid);
450 451 452 453 454
	if (!p || ((current->euid != p->euid) && (current->euid != p->uid)))
		p = ERR_PTR(-ESRCH);
	else
		get_task_struct(p);

455
	rcu_read_unlock();
456 457 458 459 460 461 462 463 464 465 466 467 468

	return p;
}

/*
 * This task is holding PI mutexes at exit time => bad.
 * Kernel cleans up PI-state, but userspace is likely hosed.
 * (Robust-futex cleanup is separate and might save the day for userspace.)
 */
void exit_pi_state_list(struct task_struct *curr)
{
	struct list_head *next, *head = &curr->pi_state_list;
	struct futex_pi_state *pi_state;
469
	struct futex_hash_bucket *hb;
470 471 472 473 474
	union futex_key key;

	/*
	 * We are a ZOMBIE and nobody can enqueue itself on
	 * pi_state_list anymore, but we have to be careful
475
	 * versus waiters unqueueing themselves:
476 477 478 479 480 481 482
	 */
	spin_lock_irq(&curr->pi_lock);
	while (!list_empty(head)) {

		next = head->next;
		pi_state = list_entry(next, struct futex_pi_state, list);
		key = pi_state->key;
483
		hb = hash_futex(&key);
484 485 486 487 488
		spin_unlock_irq(&curr->pi_lock);

		spin_lock(&hb->lock);

		spin_lock_irq(&curr->pi_lock);
489 490 491 492
		/*
		 * We dropped the pi-lock, so re-check whether this
		 * task still owns the PI-state:
		 */
493 494 495 496 497 498
		if (head->next != next) {
			spin_unlock(&hb->lock);
			continue;
		}

		WARN_ON(pi_state->owner != curr);
499 500
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
501 502 503 504 505 506 507 508 509 510 511 512 513
		pi_state->owner = NULL;
		spin_unlock_irq(&curr->pi_lock);

		rt_mutex_unlock(&pi_state->pi_mutex);

		spin_unlock(&hb->lock);

		spin_lock_irq(&curr->pi_lock);
	}
	spin_unlock_irq(&curr->pi_lock);
}

static int
P
Pierre Peiffer 已提交
514 515
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
		union futex_key *key, struct futex_pi_state **ps)
516 517 518
{
	struct futex_pi_state *pi_state = NULL;
	struct futex_q *this, *next;
P
Pierre Peiffer 已提交
519
	struct plist_head *head;
520
	struct task_struct *p;
521
	pid_t pid = uval & FUTEX_TID_MASK;
522 523 524

	head = &hb->chain;

P
Pierre Peiffer 已提交
525
	plist_for_each_entry_safe(this, next, head, list) {
P
Pierre Peiffer 已提交
526
		if (match_futex(&this->key, key)) {
527 528 529 530 531
			/*
			 * Another waiter already exists - bump up
			 * the refcount and return its pi_state:
			 */
			pi_state = this->pi_state;
532 533 534 535 536 537
			/*
			 * Userspace might have messed up non PI and PI futexes
			 */
			if (unlikely(!pi_state))
				return -EINVAL;

538
			WARN_ON(!atomic_read(&pi_state->refcount));
539 540
			WARN_ON(pid && pi_state->owner &&
				pi_state->owner->pid != pid);
541

542
			atomic_inc(&pi_state->refcount);
P
Pierre Peiffer 已提交
543
			*ps = pi_state;
544 545 546 547 548 549

			return 0;
		}
	}

	/*
550
	 * We are the first waiter - try to look up the real owner and attach
551
	 * the new pi_state to it, but bail out when TID = 0
552
	 */
553
	if (!pid)
554
		return -ESRCH;
555
	p = futex_find_get_task(pid);
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
	if (IS_ERR(p))
		return PTR_ERR(p);

	/*
	 * We need to look at the task state flags to figure out,
	 * whether the task is exiting. To protect against the do_exit
	 * change of the task flags, we do this protected by
	 * p->pi_lock:
	 */
	spin_lock_irq(&p->pi_lock);
	if (unlikely(p->flags & PF_EXITING)) {
		/*
		 * The task is on the way out. When PF_EXITPIDONE is
		 * set, we know that the task has finished the
		 * cleanup:
		 */
		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;

		spin_unlock_irq(&p->pi_lock);
		put_task_struct(p);
		return ret;
	}
578 579 580 581 582 583 584 585 586 587

	pi_state = alloc_pi_state();

	/*
	 * Initialize the pi_mutex in locked state and make 'p'
	 * the owner of it:
	 */
	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);

	/* Store the key for possible exit cleanups: */
P
Pierre Peiffer 已提交
588
	pi_state->key = *key;
589

590
	WARN_ON(!list_empty(&pi_state->list));
591 592 593 594 595 596
	list_add(&pi_state->list, &p->pi_state_list);
	pi_state->owner = p;
	spin_unlock_irq(&p->pi_lock);

	put_task_struct(p);

P
Pierre Peiffer 已提交
597
	*ps = pi_state;
598 599 600 601

	return 0;
}

L
Linus Torvalds 已提交
602 603 604 605 606 607
/*
 * The hash bucket lock must be held when this is called.
 * Afterwards, the futex_q must not be accessed.
 */
static void wake_futex(struct futex_q *q)
{
P
Pierre Peiffer 已提交
608
	plist_del(&q->list, &q->list.plist);
L
Linus Torvalds 已提交
609 610 611 612
	if (q->filp)
		send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
	/*
	 * The lock in wake_up_all() is a crucial memory barrier after the
P
Pierre Peiffer 已提交
613
	 * plist_del() and also before assigning to q->lock_ptr.
L
Linus Torvalds 已提交
614 615 616 617 618
	 */
	wake_up_all(&q->waiters);
	/*
	 * The waiting task can free the futex_q as soon as this is written,
	 * without taking any locks.  This must come last.
619 620 621 622 623
	 *
	 * A memory barrier is required here to prevent the following store
	 * to lock_ptr from getting ahead of the wakeup. Clearing the lock
	 * at the end of wake_up_all() does not prevent this store from
	 * moving.
L
Linus Torvalds 已提交
624
	 */
625
	smp_wmb();
L
Linus Torvalds 已提交
626 627 628
	q->lock_ptr = NULL;
}

629 630 631 632 633 634 635 636 637
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
	struct task_struct *new_owner;
	struct futex_pi_state *pi_state = this->pi_state;
	u32 curval, newval;

	if (!pi_state)
		return -EINVAL;

638
	spin_lock(&pi_state->pi_mutex.wait_lock);
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);

	/*
	 * This happens when we have stolen the lock and the original
	 * pending owner did not enqueue itself back on the rt_mutex.
	 * Thats not a tragedy. We know that way, that a lock waiter
	 * is on the fly. We make the futex_q waiter the pending owner.
	 */
	if (!new_owner)
		new_owner = this->task;

	/*
	 * We pass it to the next owner. (The WAITERS bit is always
	 * kept enabled while there is PI state around. We must also
	 * preserve the owner died bit.)
	 */
655
	if (!(uval & FUTEX_OWNER_DIED)) {
656 657
		int ret = 0;

658
		newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
659

T
Thomas Gleixner 已提交
660
		curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
661

662
		if (curval == -EFAULT)
663
			ret = -EFAULT;
664
		else if (curval != uval)
665 666 667 668 669
			ret = -EINVAL;
		if (ret) {
			spin_unlock(&pi_state->pi_mutex.wait_lock);
			return ret;
		}
670
	}
671

672 673 674 675 676 677 678
	spin_lock_irq(&pi_state->owner->pi_lock);
	WARN_ON(list_empty(&pi_state->list));
	list_del_init(&pi_state->list);
	spin_unlock_irq(&pi_state->owner->pi_lock);

	spin_lock_irq(&new_owner->pi_lock);
	WARN_ON(!list_empty(&pi_state->list));
679 680
	list_add(&pi_state->list, &new_owner->pi_state_list);
	pi_state->owner = new_owner;
681 682
	spin_unlock_irq(&new_owner->pi_lock);

683
	spin_unlock(&pi_state->pi_mutex.wait_lock);
684 685 686 687 688 689 690 691 692 693 694 695 696
	rt_mutex_unlock(&pi_state->pi_mutex);

	return 0;
}

static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
	u32 oldval;

	/*
	 * There is no waiter, so we unlock the futex. The owner died
	 * bit has not to be preserved here. We are the owner:
	 */
T
Thomas Gleixner 已提交
697
	oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
698 699 700 701 702 703 704 705 706

	if (oldval == -EFAULT)
		return oldval;
	if (oldval != uval)
		return -EAGAIN;

	return 0;
}

I
Ingo Molnar 已提交
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
/*
 * Express the locking dependencies for lockdep:
 */
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
	if (hb1 <= hb2) {
		spin_lock(&hb1->lock);
		if (hb1 < hb2)
			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
	} else { /* hb1 > hb2 */
		spin_lock(&hb2->lock);
		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
	}
}

L
Linus Torvalds 已提交
723 724 725 726
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
E
Eric Dumazet 已提交
727
static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
728
		      int nr_wake, u32 bitset)
L
Linus Torvalds 已提交
729
{
730
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
731
	struct futex_q *this, *next;
P
Pierre Peiffer 已提交
732
	struct plist_head *head;
733
	union futex_key key;
L
Linus Torvalds 已提交
734 735
	int ret;

736 737 738
	if (!bitset)
		return -EINVAL;

T
Thomas Gleixner 已提交
739
	futex_lock_mm(fshared);
L
Linus Torvalds 已提交
740

E
Eric Dumazet 已提交
741
	ret = get_futex_key(uaddr, fshared, &key);
L
Linus Torvalds 已提交
742 743 744
	if (unlikely(ret != 0))
		goto out;

745 746 747
	hb = hash_futex(&key);
	spin_lock(&hb->lock);
	head = &hb->chain;
L
Linus Torvalds 已提交
748

P
Pierre Peiffer 已提交
749
	plist_for_each_entry_safe(this, next, head, list) {
L
Linus Torvalds 已提交
750
		if (match_futex (&this->key, &key)) {
751 752 753 754
			if (this->pi_state) {
				ret = -EINVAL;
				break;
			}
755 756 757 758 759

			/* Check if one of the bits is set in both bitsets */
			if (!(this->bitset & bitset))
				continue;

L
Linus Torvalds 已提交
760 761 762 763 764 765
			wake_futex(this);
			if (++ret >= nr_wake)
				break;
		}
	}

766
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
767
out:
T
Thomas Gleixner 已提交
768
	futex_unlock_mm(fshared);
L
Linus Torvalds 已提交
769 770 771
	return ret;
}

772 773 774 775
/*
 * Wake up all waiters hashed on the physical page that is mapped
 * to this virtual address:
 */
776
static int
E
Eric Dumazet 已提交
777 778
futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
	      u32 __user *uaddr2,
779
	      int nr_wake, int nr_wake2, int op)
780 781
{
	union futex_key key1, key2;
782
	struct futex_hash_bucket *hb1, *hb2;
P
Pierre Peiffer 已提交
783
	struct plist_head *head;
784 785 786 787
	struct futex_q *this, *next;
	int ret, op_ret, attempt = 0;

retryfull:
T
Thomas Gleixner 已提交
788
	futex_lock_mm(fshared);
789

E
Eric Dumazet 已提交
790
	ret = get_futex_key(uaddr1, fshared, &key1);
791 792
	if (unlikely(ret != 0))
		goto out;
E
Eric Dumazet 已提交
793
	ret = get_futex_key(uaddr2, fshared, &key2);
794 795 796
	if (unlikely(ret != 0))
		goto out;

797 798
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
799 800

retry:
I
Ingo Molnar 已提交
801
	double_lock_hb(hb1, hb2);
802

803
	op_ret = futex_atomic_op_inuser(op, uaddr2);
804
	if (unlikely(op_ret < 0)) {
805
		u32 dummy;
806

807 808 809
		spin_unlock(&hb1->lock);
		if (hb1 != hb2)
			spin_unlock(&hb2->lock);
810

811
#ifndef CONFIG_MMU
812 813 814 815
		/*
		 * we don't get EFAULT from MMU faults if we don't have an MMU,
		 * but we might get them from range checking
		 */
816 817 818 819
		ret = op_ret;
		goto out;
#endif

820 821 822 823 824
		if (unlikely(op_ret != -EFAULT)) {
			ret = op_ret;
			goto out;
		}

825 826
		/*
		 * futex_atomic_op_inuser needs to both read and write
827 828 829
		 * *(int __user *)uaddr2, but we can't modify it
		 * non-atomically.  Therefore, if get_user below is not
		 * enough, we need to handle the fault ourselves, while
830 831
		 * still holding the mmap_sem.
		 */
832
		if (attempt++) {
E
Eric Dumazet 已提交
833
			ret = futex_handle_fault((unsigned long)uaddr2,
T
Thomas Gleixner 已提交
834
						 fshared, attempt);
E
Eric Dumazet 已提交
835
			if (ret)
836 837 838 839
				goto out;
			goto retry;
		}

840 841 842 843
		/*
		 * If we would have faulted, release mmap_sem,
		 * fault it in and start all over again.
		 */
T
Thomas Gleixner 已提交
844
		futex_unlock_mm(fshared);
845

846
		ret = get_user(dummy, uaddr2);
847 848 849 850 851 852
		if (ret)
			return ret;

		goto retryfull;
	}

853
	head = &hb1->chain;
854

P
Pierre Peiffer 已提交
855
	plist_for_each_entry_safe(this, next, head, list) {
856 857 858 859 860 861 862 863
		if (match_futex (&this->key, &key1)) {
			wake_futex(this);
			if (++ret >= nr_wake)
				break;
		}
	}

	if (op_ret > 0) {
864
		head = &hb2->chain;
865 866

		op_ret = 0;
P
Pierre Peiffer 已提交
867
		plist_for_each_entry_safe(this, next, head, list) {
868 869 870 871 872 873 874 875 876
			if (match_futex (&this->key, &key2)) {
				wake_futex(this);
				if (++op_ret >= nr_wake2)
					break;
			}
		}
		ret += op_ret;
	}

877 878 879
	spin_unlock(&hb1->lock);
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
880
out:
T
Thomas Gleixner 已提交
881 882
	futex_unlock_mm(fshared);

883 884 885
	return ret;
}

L
Linus Torvalds 已提交
886 887 888 889
/*
 * Requeue all waiters hashed on one physical page to another
 * physical page.
 */
E
Eric Dumazet 已提交
890 891
static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
			 u32 __user *uaddr2,
892
			 int nr_wake, int nr_requeue, u32 *cmpval)
L
Linus Torvalds 已提交
893 894
{
	union futex_key key1, key2;
895
	struct futex_hash_bucket *hb1, *hb2;
P
Pierre Peiffer 已提交
896
	struct plist_head *head1;
L
Linus Torvalds 已提交
897 898 899 900
	struct futex_q *this, *next;
	int ret, drop_count = 0;

 retry:
T
Thomas Gleixner 已提交
901
	futex_lock_mm(fshared);
L
Linus Torvalds 已提交
902

E
Eric Dumazet 已提交
903
	ret = get_futex_key(uaddr1, fshared, &key1);
L
Linus Torvalds 已提交
904 905
	if (unlikely(ret != 0))
		goto out;
E
Eric Dumazet 已提交
906
	ret = get_futex_key(uaddr2, fshared, &key2);
L
Linus Torvalds 已提交
907 908 909
	if (unlikely(ret != 0))
		goto out;

910 911
	hb1 = hash_futex(&key1);
	hb2 = hash_futex(&key2);
L
Linus Torvalds 已提交
912

I
Ingo Molnar 已提交
913
	double_lock_hb(hb1, hb2);
L
Linus Torvalds 已提交
914

915 916
	if (likely(cmpval != NULL)) {
		u32 curval;
L
Linus Torvalds 已提交
917

918
		ret = get_futex_value_locked(&curval, uaddr1);
L
Linus Torvalds 已提交
919 920

		if (unlikely(ret)) {
921 922 923
			spin_unlock(&hb1->lock);
			if (hb1 != hb2)
				spin_unlock(&hb2->lock);
L
Linus Torvalds 已提交
924

925 926
			/*
			 * If we would have faulted, release mmap_sem, fault
L
Linus Torvalds 已提交
927 928
			 * it in and start all over again.
			 */
T
Thomas Gleixner 已提交
929
			futex_unlock_mm(fshared);
L
Linus Torvalds 已提交
930

931
			ret = get_user(curval, uaddr1);
L
Linus Torvalds 已提交
932 933 934 935 936 937

			if (!ret)
				goto retry;

			return ret;
		}
938
		if (curval != *cmpval) {
L
Linus Torvalds 已提交
939 940 941 942 943
			ret = -EAGAIN;
			goto out_unlock;
		}
	}

944
	head1 = &hb1->chain;
P
Pierre Peiffer 已提交
945
	plist_for_each_entry_safe(this, next, head1, list) {
L
Linus Torvalds 已提交
946 947 948 949 950
		if (!match_futex (&this->key, &key1))
			continue;
		if (++ret <= nr_wake) {
			wake_futex(this);
		} else {
951 952 953 954 955
			/*
			 * If key1 and key2 hash to the same bucket, no need to
			 * requeue.
			 */
			if (likely(head1 != &hb2->chain)) {
P
Pierre Peiffer 已提交
956 957
				plist_del(&this->list, &hb1->chain);
				plist_add(&this->list, &hb2->chain);
958
				this->lock_ptr = &hb2->lock;
P
Pierre Peiffer 已提交
959 960 961
#ifdef CONFIG_DEBUG_PI_LIST
				this->list.plist.lock = &hb2->lock;
#endif
962
			}
L
Linus Torvalds 已提交
963
			this->key = key2;
964
			get_futex_key_refs(&key2);
L
Linus Torvalds 已提交
965 966 967 968 969 970 971 972
			drop_count++;

			if (ret - nr_wake >= nr_requeue)
				break;
		}
	}

out_unlock:
973 974 975
	spin_unlock(&hb1->lock);
	if (hb1 != hb2)
		spin_unlock(&hb2->lock);
L
Linus Torvalds 已提交
976

977
	/* drop_futex_key_refs() must be called outside the spinlocks. */
L
Linus Torvalds 已提交
978
	while (--drop_count >= 0)
979
		drop_futex_key_refs(&key1);
L
Linus Torvalds 已提交
980 981

out:
T
Thomas Gleixner 已提交
982
	futex_unlock_mm(fshared);
L
Linus Torvalds 已提交
983 984 985 986 987 988 989
	return ret;
}

/* The key must be already stored in q->key. */
static inline struct futex_hash_bucket *
queue_lock(struct futex_q *q, int fd, struct file *filp)
{
990
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
991 992 993 994 995 996

	q->fd = fd;
	q->filp = filp;

	init_waitqueue_head(&q->waiters);

997
	get_futex_key_refs(&q->key);
998 999
	hb = hash_futex(&q->key);
	q->lock_ptr = &hb->lock;
L
Linus Torvalds 已提交
1000

1001 1002
	spin_lock(&hb->lock);
	return hb;
L
Linus Torvalds 已提交
1003 1004
}

1005
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
L
Linus Torvalds 已提交
1006
{
P
Pierre Peiffer 已提交
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	int prio;

	/*
	 * The priority used to register this element is
	 * - either the real thread-priority for the real-time threads
	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
	 * - or MAX_RT_PRIO for non-RT threads.
	 * Thus, all RT-threads are woken first in priority order, and
	 * the others are woken last, in FIFO order.
	 */
	prio = min(current->normal_prio, MAX_RT_PRIO);

	plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
	q->list.plist.lock = &hb->lock;
#endif
	plist_add(&q->list, &hb->chain);
1024
	q->task = current;
1025
	spin_unlock(&hb->lock);
L
Linus Torvalds 已提交
1026 1027 1028
}

static inline void
1029
queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
L
Linus Torvalds 已提交
1030
{
1031
	spin_unlock(&hb->lock);
1032
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
}

/*
 * queue_me and unqueue_me must be called as a pair, each
 * exactly once.  They are called with the hashed spinlock held.
 */

/* The key must be already stored in q->key. */
static void queue_me(struct futex_q *q, int fd, struct file *filp)
{
1043 1044 1045 1046
	struct futex_hash_bucket *hb;

	hb = queue_lock(q, fd, filp);
	__queue_me(q, hb);
L
Linus Torvalds 已提交
1047 1048 1049 1050 1051 1052
}

/* Return 1 if we were still queued (ie. 0 means we were woken) */
static int unqueue_me(struct futex_q *q)
{
	spinlock_t *lock_ptr;
1053
	int ret = 0;
L
Linus Torvalds 已提交
1054 1055 1056 1057

	/* In the common case we don't take the spinlock, which is nice. */
 retry:
	lock_ptr = q->lock_ptr;
1058
	barrier();
1059
	if (lock_ptr != NULL) {
L
Linus Torvalds 已提交
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
		spin_lock(lock_ptr);
		/*
		 * q->lock_ptr can change between reading it and
		 * spin_lock(), causing us to take the wrong lock.  This
		 * corrects the race condition.
		 *
		 * Reasoning goes like this: if we have the wrong lock,
		 * q->lock_ptr must have changed (maybe several times)
		 * between reading it and the spin_lock().  It can
		 * change again after the spin_lock() but only if it was
		 * already changed before the spin_lock().  It cannot,
		 * however, change back to the original value.  Therefore
		 * we can detect whether we acquired the correct lock.
		 */
		if (unlikely(lock_ptr != q->lock_ptr)) {
			spin_unlock(lock_ptr);
			goto retry;
		}
P
Pierre Peiffer 已提交
1078 1079
		WARN_ON(plist_node_empty(&q->list));
		plist_del(&q->list, &q->list.plist);
1080 1081 1082

		BUG_ON(q->pi_state);

L
Linus Torvalds 已提交
1083 1084 1085 1086
		spin_unlock(lock_ptr);
		ret = 1;
	}

1087
	drop_futex_key_refs(&q->key);
L
Linus Torvalds 已提交
1088 1089 1090
	return ret;
}

1091 1092
/*
 * PI futexes can not be requeued and must remove themself from the
P
Pierre Peiffer 已提交
1093 1094
 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
 * and dropped here.
1095
 */
P
Pierre Peiffer 已提交
1096
static void unqueue_me_pi(struct futex_q *q)
1097
{
P
Pierre Peiffer 已提交
1098 1099
	WARN_ON(plist_node_empty(&q->list));
	plist_del(&q->list, &q->list.plist);
1100 1101 1102 1103 1104

	BUG_ON(!q->pi_state);
	free_pi_state(q->pi_state);
	q->pi_state = NULL;

P
Pierre Peiffer 已提交
1105
	spin_unlock(q->lock_ptr);
1106

1107
	drop_futex_key_refs(&q->key);
1108 1109
}

P
Pierre Peiffer 已提交
1110
/*
1111
 * Fixup the pi_state owner with the new owner.
P
Pierre Peiffer 已提交
1112
 *
1113 1114
 * Must be called with hash bucket lock held and mm->sem held for non
 * private futexes.
P
Pierre Peiffer 已提交
1115
 */
1116
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1117
				struct task_struct *newowner)
P
Pierre Peiffer 已提交
1118
{
1119
	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
P
Pierre Peiffer 已提交
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	struct futex_pi_state *pi_state = q->pi_state;
	u32 uval, curval, newval;
	int ret;

	/* Owner died? */
	if (pi_state->owner != NULL) {
		spin_lock_irq(&pi_state->owner->pi_lock);
		WARN_ON(list_empty(&pi_state->list));
		list_del_init(&pi_state->list);
		spin_unlock_irq(&pi_state->owner->pi_lock);
	} else
		newtid |= FUTEX_OWNER_DIED;

1133
	pi_state->owner = newowner;
P
Pierre Peiffer 已提交
1134

1135
	spin_lock_irq(&newowner->pi_lock);
P
Pierre Peiffer 已提交
1136
	WARN_ON(!list_empty(&pi_state->list));
1137 1138
	list_add(&pi_state->list, &newowner->pi_state_list);
	spin_unlock_irq(&newowner->pi_lock);
P
Pierre Peiffer 已提交
1139 1140 1141 1142 1143 1144

	/*
	 * We own it, so we have to replace the pending owner
	 * TID. This must be atomic as we have preserve the
	 * owner died bit here.
	 */
1145 1146
	ret = get_futex_value_locked(&uval, uaddr);

P
Pierre Peiffer 已提交
1147 1148
	while (!ret) {
		newval = (uval & FUTEX_OWNER_DIED) | newtid;
1149

T
Thomas Gleixner 已提交
1150
		curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1151

P
Pierre Peiffer 已提交
1152
		if (curval == -EFAULT)
1153
			ret = -EFAULT;
P
Pierre Peiffer 已提交
1154 1155 1156 1157 1158 1159 1160
		if (curval == uval)
			break;
		uval = curval;
	}
	return ret;
}

E
Eric Dumazet 已提交
1161 1162
/*
 * In case we must use restart_block to restart a futex_wait,
1163
 * we encode in the 'flags' shared capability
E
Eric Dumazet 已提交
1164
 */
1165
#define FLAGS_SHARED  1
E
Eric Dumazet 已提交
1166

N
Nick Piggin 已提交
1167
static long futex_wait_restart(struct restart_block *restart);
T
Thomas Gleixner 已提交
1168

E
Eric Dumazet 已提交
1169
static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1170
		      u32 val, ktime_t *abs_time, u32 bitset)
L
Linus Torvalds 已提交
1171
{
1172 1173
	struct task_struct *curr = current;
	DECLARE_WAITQUEUE(wait, curr);
1174
	struct futex_hash_bucket *hb;
L
Linus Torvalds 已提交
1175
	struct futex_q q;
1176 1177
	u32 uval;
	int ret;
1178
	struct hrtimer_sleeper t;
1179
	int rem = 0;
L
Linus Torvalds 已提交
1180

1181 1182 1183
	if (!bitset)
		return -EINVAL;

1184
	q.pi_state = NULL;
1185
	q.bitset = bitset;
L
Linus Torvalds 已提交
1186
 retry:
T
Thomas Gleixner 已提交
1187
	futex_lock_mm(fshared);
L
Linus Torvalds 已提交
1188

E
Eric Dumazet 已提交
1189
	ret = get_futex_key(uaddr, fshared, &q.key);
L
Linus Torvalds 已提交
1190 1191 1192
	if (unlikely(ret != 0))
		goto out_release_sem;

1193
	hb = queue_lock(&q, -1, NULL);
L
Linus Torvalds 已提交
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211

	/*
	 * Access the page AFTER the futex is queued.
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
	 * any cond.  If we queued after testing *uaddr, that would open
	 * a race condition where we could block indefinitely with
	 * cond(var) false, which would violate the guarantee.
	 *
	 * A consequence is that futex_wait() can return zero and absorb
	 * a wakeup when *uaddr != val on entry to the syscall.  This is
	 * rare, but normal.
	 *
E
Eric Dumazet 已提交
1212 1213
	 * for shared futexes, we hold the mmap semaphore, so the mapping
	 * cannot have changed since we looked it up in get_futex_key.
L
Linus Torvalds 已提交
1214
	 */
1215
	ret = get_futex_value_locked(&uval, uaddr);
L
Linus Torvalds 已提交
1216 1217

	if (unlikely(ret)) {
1218
		queue_unlock(&q, hb);
L
Linus Torvalds 已提交
1219

1220 1221
		/*
		 * If we would have faulted, release mmap_sem, fault it in and
L
Linus Torvalds 已提交
1222 1223
		 * start all over again.
		 */
T
Thomas Gleixner 已提交
1224
		futex_unlock_mm(fshared);
L
Linus Torvalds 已提交
1225

1226
		ret = get_user(uval, uaddr);
L
Linus Torvalds 已提交
1227 1228 1229 1230 1231

		if (!ret)
			goto retry;
		return ret;
	}
1232 1233 1234
	ret = -EWOULDBLOCK;
	if (uval != val)
		goto out_unlock_release_sem;
L
Linus Torvalds 已提交
1235 1236

	/* Only actually queue if *uaddr contained val.  */
1237
	__queue_me(&q, hb);
L
Linus Torvalds 已提交
1238 1239 1240 1241

	/*
	 * Now the futex is queued and we have checked the data, we
	 * don't want to hold mmap_sem while we sleep.
1242
	 */
T
Thomas Gleixner 已提交
1243
	futex_unlock_mm(fshared);
L
Linus Torvalds 已提交
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257

	/*
	 * There might have been scheduling since the queue_me(), as we
	 * cannot hold a spinlock across the get_user() in case it
	 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
	 * queueing ourselves into the futex hash.  This code thus has to
	 * rely on the futex_wake() code removing us from hash when it
	 * wakes us up.
	 */

	/* add_wait_queue is the barrier after __set_current_state. */
	__set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&q.waiters, &wait);
	/*
P
Pierre Peiffer 已提交
1258
	 * !plist_node_empty() is safe here without any lock.
L
Linus Torvalds 已提交
1259 1260
	 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
	 */
P
Pierre Peiffer 已提交
1261
	if (likely(!plist_node_empty(&q.list))) {
1262 1263 1264 1265 1266 1267 1268 1269
		if (!abs_time)
			schedule();
		else {
			hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
			hrtimer_init_sleeper(&t, current);
			t.timer.expires = *abs_time;

			hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS);
1270 1271
			if (!hrtimer_active(&t.timer))
				t.task = NULL;
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281

			/*
			 * the timer could have already expired, in which
			 * case current would be flagged for rescheduling.
			 * Don't bother calling schedule.
			 */
			if (likely(t.task))
				schedule();

			hrtimer_cancel(&t.timer);
N
Nick Piggin 已提交
1282

1283 1284 1285
			/* Flag if a timeout occured */
			rem = (t.task == NULL);
		}
N
Nick Piggin 已提交
1286
	}
L
Linus Torvalds 已提交
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
	__set_current_state(TASK_RUNNING);

	/*
	 * NOTE: we don't remove ourselves from the waitqueue because
	 * we are the only user of it.
	 */

	/* If we were woken (and unqueued), we succeeded, whatever. */
	if (!unqueue_me(&q))
		return 0;
1297
	if (rem)
L
Linus Torvalds 已提交
1298
		return -ETIMEDOUT;
N
Nick Piggin 已提交
1299

1300 1301 1302 1303
	/*
	 * We expect signal_pending(current), but another thread may
	 * have handled it for us already.
	 */
1304
	if (!abs_time)
N
Nick Piggin 已提交
1305 1306 1307 1308 1309
		return -ERESTARTSYS;
	else {
		struct restart_block *restart;
		restart = &current_thread_info()->restart_block;
		restart->fn = futex_wait_restart;
1310 1311 1312
		restart->futex.uaddr = (u32 *)uaddr;
		restart->futex.val = val;
		restart->futex.time = abs_time->tv64;
1313
		restart->futex.bitset = bitset;
1314 1315
		restart->futex.flags = 0;

E
Eric Dumazet 已提交
1316
		if (fshared)
1317
			restart->futex.flags |= FLAGS_SHARED;
N
Nick Piggin 已提交
1318 1319
		return -ERESTART_RESTARTBLOCK;
	}
L
Linus Torvalds 已提交
1320

1321 1322 1323
 out_unlock_release_sem:
	queue_unlock(&q, hb);

L
Linus Torvalds 已提交
1324
 out_release_sem:
T
Thomas Gleixner 已提交
1325
	futex_unlock_mm(fshared);
1326 1327 1328
	return ret;
}

N
Nick Piggin 已提交
1329 1330 1331

static long futex_wait_restart(struct restart_block *restart)
{
1332
	u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
E
Eric Dumazet 已提交
1333
	struct rw_semaphore *fshared = NULL;
1334
	ktime_t t;
N
Nick Piggin 已提交
1335

1336
	t.tv64 = restart->futex.time;
N
Nick Piggin 已提交
1337
	restart->fn = do_no_restart_syscall;
1338
	if (restart->futex.flags & FLAGS_SHARED)
E
Eric Dumazet 已提交
1339
		fshared = &current->mm->mmap_sem;
1340 1341
	return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
				restart->futex.bitset);
N
Nick Piggin 已提交
1342 1343 1344
}


1345 1346 1347 1348 1349 1350
/*
 * Userspace tried a 0 -> TID atomic transition of the futex value
 * and failed. The kernel side here does the whole locking operation:
 * if there are waiters then it will block, it does PI, etc. (Due to
 * races the kernel might see a 0 value of the futex too.)
 */
E
Eric Dumazet 已提交
1351 1352
static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
			 int detect, ktime_t *time, int trylock)
1353
{
1354
	struct hrtimer_sleeper timeout, *to = NULL;
1355 1356 1357 1358
	struct task_struct *curr = current;
	struct futex_hash_bucket *hb;
	u32 uval, newval, curval;
	struct futex_q q;
1359
	int ret, lock_taken, ownerdied = 0, attempt = 0;
1360 1361 1362 1363

	if (refill_pi_state_cache())
		return -ENOMEM;

1364
	if (time) {
1365
		to = &timeout;
1366
		hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
1367
		hrtimer_init_sleeper(to, current);
1368
		to->timer.expires = *time;
1369 1370
	}

1371 1372
	q.pi_state = NULL;
 retry:
T
Thomas Gleixner 已提交
1373
	futex_lock_mm(fshared);
1374

E
Eric Dumazet 已提交
1375
	ret = get_futex_key(uaddr, fshared, &q.key);
1376 1377 1378
	if (unlikely(ret != 0))
		goto out_release_sem;

1379
 retry_unlocked:
1380 1381 1382
	hb = queue_lock(&q, -1, NULL);

 retry_locked:
1383
	ret = lock_taken = 0;
P
Pierre Peiffer 已提交
1384

1385 1386 1387 1388 1389
	/*
	 * To avoid races, we attempt to take the lock here again
	 * (by doing a 0 -> TID atomic cmpxchg), while holding all
	 * the locks. It will most likely not succeed.
	 */
1390
	newval = task_pid_vnr(current);
1391

T
Thomas Gleixner 已提交
1392
	curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
1393 1394 1395 1396

	if (unlikely(curval == -EFAULT))
		goto uaddr_faulted;

1397 1398 1399 1400
	/*
	 * Detect deadlocks. In case of REQUEUE_PI this is a valid
	 * situation and we return success to user space.
	 */
1401
	if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
1402
		ret = -EDEADLK;
1403 1404 1405 1406
		goto out_unlock_release_sem;
	}

	/*
1407
	 * Surprise - we got the lock. Just return to userspace:
1408 1409 1410 1411 1412
	 */
	if (unlikely(!curval))
		goto out_unlock_release_sem;

	uval = curval;
1413

P
Pierre Peiffer 已提交
1414
	/*
1415 1416
	 * Set the WAITERS flag, so the owner will know it has someone
	 * to wake at next unlock
P
Pierre Peiffer 已提交
1417
	 */
1418 1419 1420 1421
	newval = curval | FUTEX_WAITERS;

	/*
	 * There are two cases, where a futex might have no owner (the
1422 1423 1424
	 * owner TID is 0): OWNER_DIED. We take over the futex in this
	 * case. We also do an unconditional take over, when the owner
	 * of the futex died.
1425 1426 1427 1428
	 *
	 * This is safe as we are protected by the hash bucket lock !
	 */
	if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
1429
		/* Keep the OWNER_DIED bit */
1430
		newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
1431 1432 1433
		ownerdied = 0;
		lock_taken = 1;
	}
1434

T
Thomas Gleixner 已提交
1435
	curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1436 1437 1438 1439 1440 1441

	if (unlikely(curval == -EFAULT))
		goto uaddr_faulted;
	if (unlikely(curval != uval))
		goto retry_locked;

1442
	/*
1443
	 * We took the lock due to owner died take over.
1444
	 */
1445
	if (unlikely(lock_taken))
P
Pierre Peiffer 已提交
1446 1447
		goto out_unlock_release_sem;

1448 1449 1450 1451
	/*
	 * We dont have the lock. Look up the PI state (or create it if
	 * we are the first waiter):
	 */
P
Pierre Peiffer 已提交
1452
	ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1453 1454

	if (unlikely(ret)) {
1455
		switch (ret) {
1456

1457 1458 1459 1460 1461 1462
		case -EAGAIN:
			/*
			 * Task is exiting and we just wait for the
			 * exit to complete.
			 */
			queue_unlock(&q, hb);
T
Thomas Gleixner 已提交
1463
			futex_unlock_mm(fshared);
1464 1465
			cond_resched();
			goto retry;
1466

1467 1468 1469 1470 1471 1472 1473
		case -ESRCH:
			/*
			 * No owner found for this futex. Check if the
			 * OWNER_DIED bit is set to figure out whether
			 * this is a robust futex or not.
			 */
			if (get_futex_value_locked(&curval, uaddr))
1474
				goto uaddr_faulted;
1475 1476 1477 1478 1479 1480 1481 1482

			/*
			 * We simply start over in case of a robust
			 * futex. The code above will take the futex
			 * and return happy.
			 */
			if (curval & FUTEX_OWNER_DIED) {
				ownerdied = 1;
1483
				goto retry_locked;
1484 1485 1486
			}
		default:
			goto out_unlock_release_sem;
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
		}
	}

	/*
	 * Only actually queue now that the atomic ops are done:
	 */
	__queue_me(&q, hb);

	/*
	 * Now the futex is queued and we have checked the data, we
	 * don't want to hold mmap_sem while we sleep.
	 */
T
Thomas Gleixner 已提交
1499
	futex_unlock_mm(fshared);
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512

	WARN_ON(!q.pi_state);
	/*
	 * Block on the PI mutex:
	 */
	if (!trylock)
		ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
	else {
		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
		/* Fixup the trylock return value: */
		ret = ret ? 0 : -EWOULDBLOCK;
	}

T
Thomas Gleixner 已提交
1513
	futex_lock_mm(fshared);
1514
	spin_lock(q.lock_ptr);
1515

1516 1517 1518 1519 1520 1521 1522 1523 1524
	if (!ret) {
		/*
		 * Got the lock. We might not be the anticipated owner
		 * if we did a lock-steal - fix up the PI-state in
		 * that case:
		 */
		if (q.pi_state->owner != curr)
			ret = fixup_pi_state_owner(uaddr, &q, curr);
	} else {
1525 1526
		/*
		 * Catch the rare case, where the lock was released
1527 1528
		 * when we were on the way back before we locked the
		 * hash bucket.
1529
		 */
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
		if (q.pi_state->owner == curr) {
			/*
			 * Try to get the rt_mutex now. This might
			 * fail as some other task acquired the
			 * rt_mutex after we removed ourself from the
			 * rt_mutex waiters list.
			 */
			if (rt_mutex_trylock(&q.pi_state->pi_mutex))
				ret = 0;
			else {
				/*
				 * pi_state is incorrect, some other
				 * task did a lock steal and we
				 * returned due to timeout or signal
				 * without taking the rt_mutex. Too
				 * late. We can access the
				 * rt_mutex_owner without locking, as
				 * the other task is now blocked on
				 * the hash bucket lock. Fix the state
				 * up.
				 */
				struct task_struct *owner;
				int res;

				owner = rt_mutex_owner(&q.pi_state->pi_mutex);
				res = fixup_pi_state_owner(uaddr, &q, owner);

				/* propagate -EFAULT, if the fixup failed */
				if (res)
					ret = res;
			}
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
		} else {
			/*
			 * Paranoia check. If we did not take the lock
			 * in the trylock above, then we should not be
			 * the owner of the rtmutex, neither the real
			 * nor the pending one:
			 */
			if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
				printk(KERN_ERR "futex_lock_pi: ret = %d "
				       "pi-mutex: %p pi-state %p\n", ret,
				       q.pi_state->pi_mutex.owner,
				       q.pi_state->owner);
1573 1574 1575
		}
	}

1576 1577
	/* Unqueue and drop the lock */
	unqueue_me_pi(&q);
T
Thomas Gleixner 已提交
1578
	futex_unlock_mm(fshared);
1579

1580
	return ret != -EINTR ? ret : -ERESTARTNOINTR;
1581 1582 1583 1584 1585

 out_unlock_release_sem:
	queue_unlock(&q, hb);

 out_release_sem:
T
Thomas Gleixner 已提交
1586
	futex_unlock_mm(fshared);
1587 1588 1589 1590 1591 1592 1593 1594
	return ret;

 uaddr_faulted:
	/*
	 * We have to r/w  *(int __user *)uaddr, but we can't modify it
	 * non-atomically.  Therefore, if get_user below is not
	 * enough, we need to handle the fault ourselves, while
	 * still holding the mmap_sem.
1595 1596
	 *
	 * ... and hb->lock. :-) --ANK
1597
	 */
1598 1599
	queue_unlock(&q, hb);

1600
	if (attempt++) {
E
Eric Dumazet 已提交
1601 1602 1603
		ret = futex_handle_fault((unsigned long)uaddr, fshared,
					 attempt);
		if (ret)
1604 1605
			goto out_release_sem;
		goto retry_unlocked;
1606 1607
	}

T
Thomas Gleixner 已提交
1608
	futex_unlock_mm(fshared);
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621

	ret = get_user(uval, uaddr);
	if (!ret && (uval != -EFAULT))
		goto retry;

	return ret;
}

/*
 * Userspace attempted a TID -> 0 atomic transition, and failed.
 * This is the in-kernel slowpath: we look up the PI state (if any),
 * and do the rt-mutex unlock.
 */
E
Eric Dumazet 已提交
1622
static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
1623 1624 1625 1626
{
	struct futex_hash_bucket *hb;
	struct futex_q *this, *next;
	u32 uval;
P
Pierre Peiffer 已提交
1627
	struct plist_head *head;
1628 1629 1630 1631 1632 1633 1634 1635 1636
	union futex_key key;
	int ret, attempt = 0;

retry:
	if (get_user(uval, uaddr))
		return -EFAULT;
	/*
	 * We release only a lock we actually own:
	 */
1637
	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1638 1639 1640 1641
		return -EPERM;
	/*
	 * First take all the futex related locks:
	 */
T
Thomas Gleixner 已提交
1642
	futex_lock_mm(fshared);
1643

E
Eric Dumazet 已提交
1644
	ret = get_futex_key(uaddr, fshared, &key);
1645 1646 1647 1648
	if (unlikely(ret != 0))
		goto out;

	hb = hash_futex(&key);
1649
retry_unlocked:
1650 1651 1652 1653 1654 1655 1656
	spin_lock(&hb->lock);

	/*
	 * To avoid races, try to do the TID -> 0 atomic transition
	 * again. If it succeeds then we can return without waking
	 * anyone else up:
	 */
T
Thomas Gleixner 已提交
1657
	if (!(uval & FUTEX_OWNER_DIED))
1658
		uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
T
Thomas Gleixner 已提交
1659

1660 1661 1662 1663 1664 1665 1666

	if (unlikely(uval == -EFAULT))
		goto pi_faulted;
	/*
	 * Rare case: we managed to release the lock atomically,
	 * no need to wake anyone else up:
	 */
1667
	if (unlikely(uval == task_pid_vnr(current)))
1668 1669 1670 1671 1672 1673 1674 1675
		goto out_unlock;

	/*
	 * Ok, other tasks may need to be woken up - check waiters
	 * and do the wakeup if necessary:
	 */
	head = &hb->chain;

P
Pierre Peiffer 已提交
1676
	plist_for_each_entry_safe(this, next, head, list) {
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
		if (!match_futex (&this->key, &key))
			continue;
		ret = wake_futex_pi(uaddr, uval, this);
		/*
		 * The atomic access to the futex value
		 * generated a pagefault, so retry the
		 * user-access and the wakeup:
		 */
		if (ret == -EFAULT)
			goto pi_faulted;
		goto out_unlock;
	}
	/*
	 * No waiters - kernel unlocks the futex:
	 */
1692 1693 1694 1695 1696
	if (!(uval & FUTEX_OWNER_DIED)) {
		ret = unlock_futex_pi(uaddr, uval);
		if (ret == -EFAULT)
			goto pi_faulted;
	}
1697 1698 1699 1700

out_unlock:
	spin_unlock(&hb->lock);
out:
T
Thomas Gleixner 已提交
1701
	futex_unlock_mm(fshared);
1702 1703 1704 1705 1706 1707 1708 1709 1710

	return ret;

pi_faulted:
	/*
	 * We have to r/w  *(int __user *)uaddr, but we can't modify it
	 * non-atomically.  Therefore, if get_user below is not
	 * enough, we need to handle the fault ourselves, while
	 * still holding the mmap_sem.
1711 1712
	 *
	 * ... and hb->lock. --ANK
1713
	 */
1714 1715
	spin_unlock(&hb->lock);

1716
	if (attempt++) {
E
Eric Dumazet 已提交
1717 1718 1719
		ret = futex_handle_fault((unsigned long)uaddr, fshared,
					 attempt);
		if (ret)
1720
			goto out;
1721
		uval = 0;
1722
		goto retry_unlocked;
1723 1724
	}

T
Thomas Gleixner 已提交
1725
	futex_unlock_mm(fshared);
1726 1727 1728 1729 1730

	ret = get_user(uval, uaddr);
	if (!ret && (uval != -EFAULT))
		goto retry;

L
Linus Torvalds 已提交
1731 1732 1733 1734 1735 1736 1737 1738 1739
	return ret;
}

static int futex_close(struct inode *inode, struct file *filp)
{
	struct futex_q *q = filp->private_data;

	unqueue_me(q);
	kfree(q);
1740

L
Linus Torvalds 已提交
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
	return 0;
}

/* This is one-shot: once it's gone off you need a new fd */
static unsigned int futex_poll(struct file *filp,
			       struct poll_table_struct *wait)
{
	struct futex_q *q = filp->private_data;
	int ret = 0;

	poll_wait(filp, &q->waiters, wait);

	/*
P
Pierre Peiffer 已提交
1754
	 * plist_node_empty() is safe here without any lock.
L
Linus Torvalds 已提交
1755 1756
	 * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
	 */
P
Pierre Peiffer 已提交
1757
	if (plist_node_empty(&q->list))
L
Linus Torvalds 已提交
1758 1759 1760 1761 1762
		ret = POLLIN | POLLRDNORM;

	return ret;
}

1763
static const struct file_operations futex_fops = {
L
Linus Torvalds 已提交
1764 1765 1766 1767 1768 1769 1770 1771
	.release	= futex_close,
	.poll		= futex_poll,
};

/*
 * Signal allows caller to avoid the race which would occur if they
 * set the sigio stuff up afterwards.
 */
1772
static int futex_fd(u32 __user *uaddr, int signal)
L
Linus Torvalds 已提交
1773 1774 1775 1776
{
	struct futex_q *q;
	struct file *filp;
	int ret, err;
E
Eric Dumazet 已提交
1777
	struct rw_semaphore *fshared;
1778 1779 1780 1781
	static unsigned long printk_interval;

	if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
		printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
T
Thomas Gleixner 已提交
1782 1783
		       "will be removed from the kernel in June 2007\n",
		       current->comm);
1784
	}
L
Linus Torvalds 已提交
1785 1786

	ret = -EINVAL;
1787
	if (!valid_signal(signal))
L
Linus Torvalds 已提交
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
		goto out;

	ret = get_unused_fd();
	if (ret < 0)
		goto out;
	filp = get_empty_filp();
	if (!filp) {
		put_unused_fd(ret);
		ret = -ENFILE;
		goto out;
	}
	filp->f_op = &futex_fops;
1800 1801 1802
	filp->f_path.mnt = mntget(futex_mnt);
	filp->f_path.dentry = dget(futex_mnt->mnt_root);
	filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
L
Linus Torvalds 已提交
1803 1804

	if (signal) {
1805
		err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1);
L
Linus Torvalds 已提交
1806
		if (err < 0) {
1807
			goto error;
L
Linus Torvalds 已提交
1808 1809 1810 1811 1812 1813
		}
		filp->f_owner.signum = signal;
	}

	q = kmalloc(sizeof(*q), GFP_KERNEL);
	if (!q) {
1814 1815
		err = -ENOMEM;
		goto error;
L
Linus Torvalds 已提交
1816
	}
1817
	q->pi_state = NULL;
L
Linus Torvalds 已提交
1818

E
Eric Dumazet 已提交
1819 1820 1821
	fshared = &current->mm->mmap_sem;
	down_read(fshared);
	err = get_futex_key(uaddr, fshared, &q->key);
L
Linus Torvalds 已提交
1822 1823

	if (unlikely(err != 0)) {
E
Eric Dumazet 已提交
1824
		up_read(fshared);
L
Linus Torvalds 已提交
1825
		kfree(q);
1826
		goto error;
L
Linus Torvalds 已提交
1827 1828 1829 1830 1831 1832 1833 1834 1835
	}

	/*
	 * queue_me() must be called before releasing mmap_sem, because
	 * key->shared.inode needs to be referenced while holding it.
	 */
	filp->private_data = q;

	queue_me(q, ret, filp);
E
Eric Dumazet 已提交
1836
	up_read(fshared);
L
Linus Torvalds 已提交
1837 1838 1839 1840 1841

	/* Now we map fd to filp, so userspace can access it */
	fd_install(ret, filp);
out:
	return ret;
1842 1843 1844 1845 1846
error:
	put_unused_fd(ret);
	put_filp(filp);
	ret = err;
	goto out;
L
Linus Torvalds 已提交
1847 1848
}

1849 1850 1851 1852 1853 1854 1855
/*
 * Support for robust futexes: the kernel cleans up held futexes at
 * thread exit time.
 *
 * Implementation: user-space maintains a per-thread list of locks it
 * is holding. Upon do_exit(), the kernel carefully walks this list,
 * and marks all locks that are owned by this thread with the
1856
 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
 * always manipulated with the lock held, so the list is private and
 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
 * field, to allow the kernel to clean up if the thread dies after
 * acquiring the lock, but just before it could have added itself to
 * the list. There can only be one such pending lock.
 */

/**
 * sys_set_robust_list - set the robust-futex list head of a task
 * @head: pointer to the list-head
 * @len: length of the list-head, as userspace expects
 */
asmlinkage long
sys_set_robust_list(struct robust_list_head __user *head,
		    size_t len)
{
	/*
	 * The kernel knows only one size for now:
	 */
	if (unlikely(len != sizeof(*head)))
		return -EINVAL;

	current->robust_list = head;

	return 0;
}

/**
 * sys_get_robust_list - get the robust-futex list head of a task
 * @pid: pid of the process [zero for current task]
 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
 * @len_ptr: pointer to a length field, the kernel fills in the header size
 */
asmlinkage long
A
Al Viro 已提交
1891
sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
1892 1893
		    size_t __user *len_ptr)
{
A
Al Viro 已提交
1894
	struct robust_list_head __user *head;
1895 1896 1897 1898 1899 1900 1901 1902
	unsigned long ret;

	if (!pid)
		head = current->robust_list;
	else {
		struct task_struct *p;

		ret = -ESRCH;
1903
		rcu_read_lock();
1904
		p = find_task_by_vpid(pid);
1905 1906 1907 1908 1909 1910 1911
		if (!p)
			goto err_unlock;
		ret = -EPERM;
		if ((current->euid != p->euid) && (current->euid != p->uid) &&
				!capable(CAP_SYS_PTRACE))
			goto err_unlock;
		head = p->robust_list;
1912
		rcu_read_unlock();
1913 1914 1915 1916 1917 1918 1919
	}

	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(head, head_ptr);

err_unlock:
1920
	rcu_read_unlock();
1921 1922 1923 1924 1925 1926 1927 1928

	return ret;
}

/*
 * Process a futex-list entry, check whether it's owned by the
 * dying task, and do notification if so:
 */
1929
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1930
{
1931
	u32 uval, nval, mval;
1932

1933 1934
retry:
	if (get_user(uval, uaddr))
1935 1936
		return -1;

1937
	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
		/*
		 * Ok, this dying thread is truly holding a futex
		 * of interest. Set the OWNER_DIED bit atomically
		 * via cmpxchg, and if the value had FUTEX_WAITERS
		 * set, wake up a waiter (if any). (We have to do a
		 * futex_wake() even if OWNER_DIED is already set -
		 * to handle the rare but possible case of recursive
		 * thread-death.) The rest of the cleanup is done in
		 * userspace.
		 */
1948 1949 1950
		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
		nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);

1951 1952 1953 1954
		if (nval == -EFAULT)
			return -1;

		if (nval != uval)
1955
			goto retry;
1956

1957 1958 1959 1960
		/*
		 * Wake robust non-PI futexes here. The wakeup of
		 * PI futexes happens in exit_pi_state():
		 */
T
Thomas Gleixner 已提交
1961
		if (!pi && (uval & FUTEX_WAITERS))
1962 1963
			futex_wake(uaddr, &curr->mm->mmap_sem, 1,
				   FUTEX_BITSET_MATCH_ANY);
1964 1965 1966 1967
	}
	return 0;
}

1968 1969 1970 1971
/*
 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
 */
static inline int fetch_robust_entry(struct robust_list __user **entry,
A
Al Viro 已提交
1972 1973
				     struct robust_list __user * __user *head,
				     int *pi)
1974 1975 1976
{
	unsigned long uentry;

A
Al Viro 已提交
1977
	if (get_user(uentry, (unsigned long __user *)head))
1978 1979
		return -EFAULT;

A
Al Viro 已提交
1980
	*entry = (void __user *)(uentry & ~1UL);
1981 1982 1983 1984 1985
	*pi = uentry & 1;

	return 0;
}

1986 1987 1988 1989 1990 1991 1992 1993 1994
/*
 * Walk curr->robust_list (very carefully, it's a userspace list!)
 * and mark any locks found there dead, and notify any waiters.
 *
 * We silently return on any sign of list-walking problem.
 */
void exit_robust_list(struct task_struct *curr)
{
	struct robust_list_head __user *head = curr->robust_list;
M
Martin Schwidefsky 已提交
1995 1996
	struct robust_list __user *entry, *next_entry, *pending;
	unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1997
	unsigned long futex_offset;
M
Martin Schwidefsky 已提交
1998
	int rc;
1999 2000 2001 2002 2003

	/*
	 * Fetch the list head (which was registered earlier, via
	 * sys_set_robust_list()):
	 */
2004
	if (fetch_robust_entry(&entry, &head->list.next, &pi))
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
		return;
	/*
	 * Fetch the relative futex offset:
	 */
	if (get_user(futex_offset, &head->futex_offset))
		return;
	/*
	 * Fetch any possibly pending lock-add first, and handle it
	 * if it exists:
	 */
2015
	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2016
		return;
2017

M
Martin Schwidefsky 已提交
2018
	next_entry = NULL;	/* avoid warning with gcc */
2019
	while (entry != &head->list) {
M
Martin Schwidefsky 已提交
2020 2021 2022 2023 2024
		/*
		 * Fetch the next entry in the list before calling
		 * handle_futex_death:
		 */
		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2025 2026
		/*
		 * A pending lock might already be on the list, so
2027
		 * don't process it twice:
2028 2029
		 */
		if (entry != pending)
A
Al Viro 已提交
2030
			if (handle_futex_death((void __user *)entry + futex_offset,
2031
						curr, pi))
2032
				return;
M
Martin Schwidefsky 已提交
2033
		if (rc)
2034
			return;
M
Martin Schwidefsky 已提交
2035 2036
		entry = next_entry;
		pi = next_pi;
2037 2038 2039 2040 2041 2042 2043 2044
		/*
		 * Avoid excessively long or circular lists:
		 */
		if (!--limit)
			break;

		cond_resched();
	}
M
Martin Schwidefsky 已提交
2045 2046 2047 2048

	if (pending)
		handle_futex_death((void __user *)pending + futex_offset,
				   curr, pip);
2049 2050
}

2051
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2052
		u32 __user *uaddr2, u32 val2, u32 val3)
L
Linus Torvalds 已提交
2053 2054
{
	int ret;
E
Eric Dumazet 已提交
2055 2056 2057 2058 2059
	int cmd = op & FUTEX_CMD_MASK;
	struct rw_semaphore *fshared = NULL;

	if (!(op & FUTEX_PRIVATE_FLAG))
		fshared = &current->mm->mmap_sem;
L
Linus Torvalds 已提交
2060

E
Eric Dumazet 已提交
2061
	switch (cmd) {
L
Linus Torvalds 已提交
2062
	case FUTEX_WAIT:
2063 2064 2065
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAIT_BITSET:
		ret = futex_wait(uaddr, fshared, val, timeout, val3);
L
Linus Torvalds 已提交
2066 2067
		break;
	case FUTEX_WAKE:
2068 2069 2070
		val3 = FUTEX_BITSET_MATCH_ANY;
	case FUTEX_WAKE_BITSET:
		ret = futex_wake(uaddr, fshared, val, val3);
L
Linus Torvalds 已提交
2071 2072 2073 2074 2075 2076
		break;
	case FUTEX_FD:
		/* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
		ret = futex_fd(uaddr, val);
		break;
	case FUTEX_REQUEUE:
E
Eric Dumazet 已提交
2077
		ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
L
Linus Torvalds 已提交
2078 2079
		break;
	case FUTEX_CMP_REQUEUE:
E
Eric Dumazet 已提交
2080
		ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
L
Linus Torvalds 已提交
2081
		break;
2082
	case FUTEX_WAKE_OP:
E
Eric Dumazet 已提交
2083
		ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2084
		break;
2085
	case FUTEX_LOCK_PI:
E
Eric Dumazet 已提交
2086
		ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2087 2088
		break;
	case FUTEX_UNLOCK_PI:
E
Eric Dumazet 已提交
2089
		ret = futex_unlock_pi(uaddr, fshared);
2090 2091
		break;
	case FUTEX_TRYLOCK_PI:
E
Eric Dumazet 已提交
2092
		ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2093
		break;
L
Linus Torvalds 已提交
2094 2095 2096 2097 2098 2099 2100
	default:
		ret = -ENOSYS;
	}
	return ret;
}


2101
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
L
Linus Torvalds 已提交
2102
			  struct timespec __user *utime, u32 __user *uaddr2,
2103
			  u32 val3)
L
Linus Torvalds 已提交
2104
{
2105 2106
	struct timespec ts;
	ktime_t t, *tp = NULL;
2107
	u32 val2 = 0;
E
Eric Dumazet 已提交
2108
	int cmd = op & FUTEX_CMD_MASK;
L
Linus Torvalds 已提交
2109

2110 2111
	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
		      cmd == FUTEX_WAIT_BITSET)) {
2112
		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
L
Linus Torvalds 已提交
2113
			return -EFAULT;
2114
		if (!timespec_valid(&ts))
2115
			return -EINVAL;
2116 2117

		t = timespec_to_ktime(ts);
E
Eric Dumazet 已提交
2118
		if (cmd == FUTEX_WAIT)
2119 2120
			t = ktime_add(ktime_get(), t);
		tp = &t;
L
Linus Torvalds 已提交
2121 2122
	}
	/*
E
Eric Dumazet 已提交
2123
	 * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
2124
	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
L
Linus Torvalds 已提交
2125
	 */
2126 2127
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
	    cmd == FUTEX_WAKE_OP)
2128
		val2 = (u32) (unsigned long) utime;
L
Linus Torvalds 已提交
2129

2130
	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
L
Linus Torvalds 已提交
2131 2132
}

2133 2134 2135
static int futexfs_get_sb(struct file_system_type *fs_type,
			  int flags, const char *dev_name, void *data,
			  struct vfsmount *mnt)
L
Linus Torvalds 已提交
2136
{
2137
	return get_sb_pseudo(fs_type, "futex", NULL, FUTEXFS_SUPER_MAGIC, mnt);
L
Linus Torvalds 已提交
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
}

static struct file_system_type futex_fs_type = {
	.name		= "futexfs",
	.get_sb		= futexfs_get_sb,
	.kill_sb	= kill_anon_super,
};

static int __init init(void)
{
A
Akinobu Mita 已提交
2148 2149 2150 2151
	int i = register_filesystem(&futex_fs_type);

	if (i)
		return i;
L
Linus Torvalds 已提交
2152 2153

	futex_mnt = kern_mount(&futex_fs_type);
A
Akinobu Mita 已提交
2154 2155 2156 2157
	if (IS_ERR(futex_mnt)) {
		unregister_filesystem(&futex_fs_type);
		return PTR_ERR(futex_mnt);
	}
L
Linus Torvalds 已提交
2158 2159

	for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
P
Pierre Peiffer 已提交
2160
		plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
L
Linus Torvalds 已提交
2161 2162 2163 2164 2165
		spin_lock_init(&futex_queues[i].lock);
	}
	return 0;
}
__initcall(init);