eventpoll.c 38.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
D
Davide Libenzi 已提交
2 3
 *  fs/eventpoll.c (Efficient event retrieval implementation)
 *  Copyright (C) 2001,...,2009	 Davide Libenzi
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  Davide Libenzi <davidel@xmailserver.org>
 *
 */

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/rbtree.h>
#include <linux/wait.h>
#include <linux/eventpoll.h>
#include <linux/mount.h>
#include <linux/bitops.h>
A
Arjan van de Ven 已提交
34
#include <linux/mutex.h>
D
Davide Libenzi 已提交
35
#include <linux/anon_inodes.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/mman.h>
#include <asm/atomic.h>

/*
 * LOCKING:
 * There are three level of locking required by epoll :
 *
A
Arjan van de Ven 已提交
46
 * 1) epmutex (mutex)
47 48
 * 2) ep->mtx (mutex)
 * 3) ep->lock (spinlock)
L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57
 *
 * The acquire order is the one listed above, from 1 to 3.
 * We need a spinlock (ep->lock) because we manipulate objects
 * from inside the poll callback, that might be triggered from
 * a wake_up() that in turn might be called from IRQ context.
 * So we can't sleep inside the poll callback and hence we need
 * a spinlock. During the event transfer loop (from kernel to
 * user space) we could end up sleeping due a copy_to_user(), so
 * we need a lock that will allow us to sleep. This lock is a
58 59 60 61 62
 * mutex (ep->mtx). It is acquired during the event transfer loop,
 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
 * Then we also need a global mutex to serialize eventpoll_release_file()
 * and ep_free().
 * This mutex is acquired by ep_free() during the epoll file
L
Linus Torvalds 已提交
63 64 65
 * cleanup path and it is also acquired by eventpoll_release_file()
 * if a file has been pushed inside an epoll set and it is then
 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
66 67 68
 * It is possible to drop the "ep->mtx" and to use the global
 * mutex "epmutex" (together with "ep->lock") to have it working,
 * but having "ep->mtx" will make the interface more scalable.
A
Arjan van de Ven 已提交
69
 * Events that require holding "epmutex" are very rare, while for
70 71
 * normal operations the epoll private "ep->mtx" will guarantee
 * a better scalability.
L
Linus Torvalds 已提交
72 73 74 75 76
 */

/* Epoll private bits inside the event mask */
#define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)

D
Davide Libenzi 已提交
77 78
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
L
Linus Torvalds 已提交
79

80 81 82
/* Maximum msec timeout value storeable in a long int */
#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)

D
Davide Libenzi 已提交
83 84
#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))

85 86
#define EP_UNACTIVE_PTR ((void *) -1L)

87 88
#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))

L
Linus Torvalds 已提交
89 90 91 92 93 94
struct epoll_filefd {
	struct file *file;
	int fd;
};

/*
D
Davide Libenzi 已提交
95 96
 * Structure used to track possible nested calls, for too deep recursions
 * and loop cycles.
L
Linus Torvalds 已提交
97
 */
D
Davide Libenzi 已提交
98
struct nested_call_node {
L
Linus Torvalds 已提交
99
	struct list_head llink;
D
Davide Libenzi 已提交
100
	void *cookie;
101
	int cpu;
L
Linus Torvalds 已提交
102 103 104
};

/*
D
Davide Libenzi 已提交
105 106
 * This structure is used as collector for nested calls, to check for
 * maximum recursion dept and loop cycles.
L
Linus Torvalds 已提交
107
 */
D
Davide Libenzi 已提交
108 109
struct nested_calls {
	struct list_head tasks_call_list;
L
Linus Torvalds 已提交
110 111 112
	spinlock_t lock;
};

113 114 115 116 117
/*
 * Each file descriptor added to the eventpoll interface will
 * have an entry of this type linked to the "rbr" RB tree.
 */
struct epitem {
D
Davide Libenzi 已提交
118
	/* RB tree node used to link this structure to the eventpoll RB tree */
119 120 121 122 123
	struct rb_node rbn;

	/* List header used to link this structure to the eventpoll ready list */
	struct list_head rdllink;

124 125 126 127 128 129
	/*
	 * Works together "struct eventpoll"->ovflist in keeping the
	 * single linked chain of items.
	 */
	struct epitem *next;

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
	/* The file descriptor information this item refers to */
	struct epoll_filefd ffd;

	/* Number of active wait queue attached to poll operations */
	int nwait;

	/* List containing poll wait queues */
	struct list_head pwqlist;

	/* The "container" of this item */
	struct eventpoll *ep;

	/* List header used to link this item to the "struct file" items list */
	struct list_head fllink;

145 146
	/* The structure that describe the interested events and the source fd */
	struct epoll_event event;
147 148
};

L
Linus Torvalds 已提交
149 150 151 152 153 154 155
/*
 * This structure is stored inside the "private_data" member of the file
 * structure and rapresent the main data sructure for the eventpoll
 * interface.
 */
struct eventpoll {
	/* Protect the this structure access */
156
	spinlock_t lock;
L
Linus Torvalds 已提交
157 158

	/*
159 160 161 162
	 * This mutex is used to ensure that files are not removed
	 * while epoll is using them. This is held during the event
	 * collection loop, the file cleanup path, the epoll file exit
	 * code and the ctl operations.
L
Linus Torvalds 已提交
163
	 */
164
	struct mutex mtx;
L
Linus Torvalds 已提交
165 166 167 168 169 170 171 172 173 174

	/* Wait queue used by sys_epoll_wait() */
	wait_queue_head_t wq;

	/* Wait queue used by file->poll() */
	wait_queue_head_t poll_wait;

	/* List of ready file descriptors */
	struct list_head rdllist;

D
Davide Libenzi 已提交
175
	/* RB tree root used to store monitored fd structs */
L
Linus Torvalds 已提交
176
	struct rb_root rbr;
177 178 179 180 181 182 183

	/*
	 * This is a single linked list that chains all the "struct epitem" that
	 * happened while transfering ready events to userspace w/out
	 * holding ->lock.
	 */
	struct epitem *ovflist;
184 185 186

	/* The user that created the eventpoll descriptor */
	struct user_struct *user;
L
Linus Torvalds 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
};

/* Wait structure used by the poll hooks */
struct eppoll_entry {
	/* List header used to link this structure to the "struct epitem" */
	struct list_head llink;

	/* The "base" pointer is set to the container "struct epitem" */
	void *base;

	/*
	 * Wait queue item that will be linked to the target file wait
	 * queue head.
	 */
	wait_queue_t wait;

	/* The wait queue head that linked the "wait" wait queue item */
	wait_queue_head_t *whead;
};

/* Wrapper struct used by poll queueing */
struct ep_pqueue {
	poll_table pt;
	struct epitem *epi;
};

D
Davide Libenzi 已提交
213 214 215 216 217 218
/* Used by the ep_send_events() function as callback private data */
struct ep_send_events_data {
	int maxevents;
	struct epoll_event __user *events;
};

219 220 221 222 223 224
/*
 * Configuration options available inside /proc/sys/fs/epoll/
 */
/* Maximum number of epoll watched descriptors, per user */
static int max_user_watches __read_mostly;

L
Linus Torvalds 已提交
225
/*
226
 * This mutex is used to serialize ep_free() and eventpoll_release_file().
L
Linus Torvalds 已提交
227
 */
228
static DEFINE_MUTEX(epmutex);
L
Linus Torvalds 已提交
229

D
Davide Libenzi 已提交
230 231 232 233 234
/* Used for safe wake up implementation */
static struct nested_calls poll_safewake_ncalls;

/* Used to call file's f_op->poll() under the nested calls boundaries */
static struct nested_calls poll_readywalk_ncalls;
L
Linus Torvalds 已提交
235 236

/* Slab cache used to allocate "struct epitem" */
237
static struct kmem_cache *epi_cache __read_mostly;
L
Linus Torvalds 已提交
238 239

/* Slab cache used to allocate "struct eppoll_entry" */
240
static struct kmem_cache *pwq_cache __read_mostly;
L
Linus Torvalds 已提交
241

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
#ifdef CONFIG_SYSCTL

#include <linux/sysctl.h>

static int zero;

ctl_table epoll_table[] = {
	{
		.procname	= "max_user_watches",
		.data		= &max_user_watches,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec_minmax,
		.extra1		= &zero,
	},
	{ .ctl_name = 0 }
};
#endif /* CONFIG_SYSCTL */

261

D
Davide Libenzi 已提交
262
/* Setup the structure that is used as key for the RB tree */
263 264 265 266 267 268 269
static inline void ep_set_ffd(struct epoll_filefd *ffd,
			      struct file *file, int fd)
{
	ffd->file = file;
	ffd->fd = fd;
}

D
Davide Libenzi 已提交
270
/* Compare RB tree keys */
271 272 273 274 275 276 277 278 279 280 281 282 283 284
static inline int ep_cmp_ffd(struct epoll_filefd *p1,
			     struct epoll_filefd *p2)
{
	return (p1->file > p2->file ? +1:
	        (p1->file < p2->file ? -1 : p1->fd - p2->fd));
}

/* Tells us if the item is currently linked */
static inline int ep_is_linked(struct list_head *p)
{
	return !list_empty(p);
}

/* Get the "struct epitem" from a wait queue pointer */
D
Davide Libenzi 已提交
285
static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
286 287 288 289 290
{
	return container_of(p, struct eppoll_entry, wait)->base;
}

/* Get the "struct epitem" from an epoll queue wrapper */
D
Davide Libenzi 已提交
291
static inline struct epitem *ep_item_from_epqueue(poll_table *p)
292 293 294 295 296
{
	return container_of(p, struct ep_pqueue, pt)->epi;
}

/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
297
static inline int ep_op_has_event(int op)
298 299 300 301
{
	return op != EPOLL_CTL_DEL;
}

L
Linus Torvalds 已提交
302
/* Initialize the poll safe wake up structure */
D
Davide Libenzi 已提交
303
static void ep_nested_calls_init(struct nested_calls *ncalls)
L
Linus Torvalds 已提交
304
{
D
Davide Libenzi 已提交
305 306
	INIT_LIST_HEAD(&ncalls->tasks_call_list);
	spin_lock_init(&ncalls->lock);
L
Linus Torvalds 已提交
307 308
}

D
Davide Libenzi 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322
/**
 * ep_call_nested - Perform a bound (possibly) nested call, by checking
 *                  that the recursion limit is not exceeded, and that
 *                  the same nested call (by the meaning of same cookie) is
 *                  no re-entered.
 *
 * @ncalls: Pointer to the nested_calls structure to be used for this call.
 * @max_nests: Maximum number of allowed nesting calls.
 * @nproc: Nested call core function pointer.
 * @priv: Opaque data to be passed to the @nproc callback.
 * @cookie: Cookie to be used to identify this nested call.
 *
 * Returns: Returns the code returned by the @nproc callback, or -1 if
 *          the maximum recursion limit has been exceeded.
L
Linus Torvalds 已提交
323
 */
D
Davide Libenzi 已提交
324 325 326
static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
			  int (*nproc)(void *, void *, int), void *priv,
			  void *cookie)
L
Linus Torvalds 已提交
327
{
D
Davide Libenzi 已提交
328
	int error, call_nests = 0;
L
Linus Torvalds 已提交
329
	unsigned long flags;
330
	int this_cpu = get_cpu();
D
Davide Libenzi 已提交
331 332 333
	struct list_head *lsthead = &ncalls->tasks_call_list;
	struct nested_call_node *tncur;
	struct nested_call_node tnode;
L
Linus Torvalds 已提交
334

D
Davide Libenzi 已提交
335
	spin_lock_irqsave(&ncalls->lock, flags);
L
Linus Torvalds 已提交
336

D
Davide Libenzi 已提交
337 338 339 340 341
	/*
	 * Try to see if the current task is already inside this wakeup call.
	 * We use a list here, since the population inside this set is always
	 * very much limited.
	 */
342
	list_for_each_entry(tncur, lsthead, llink) {
343
		if (tncur->cpu == this_cpu &&
D
Davide Libenzi 已提交
344
		    (tncur->cookie == cookie || ++call_nests > max_nests)) {
L
Linus Torvalds 已提交
345 346 347 348
			/*
			 * Ops ... loop detected or maximum nest level reached.
			 * We abort this wake by breaking the cycle itself.
			 */
349 350
			error = -1;
			goto out_unlock;
L
Linus Torvalds 已提交
351 352 353
		}
	}

D
Davide Libenzi 已提交
354
	/* Add the current task and cookie to the list */
355
	tnode.cpu = this_cpu;
D
Davide Libenzi 已提交
356
	tnode.cookie = cookie;
L
Linus Torvalds 已提交
357 358
	list_add(&tnode.llink, lsthead);

D
Davide Libenzi 已提交
359
	spin_unlock_irqrestore(&ncalls->lock, flags);
L
Linus Torvalds 已提交
360

D
Davide Libenzi 已提交
361 362
	/* Call the nested function */
	error = (*nproc)(priv, cookie, call_nests);
L
Linus Torvalds 已提交
363 364

	/* Remove the current task from the list */
D
Davide Libenzi 已提交
365
	spin_lock_irqsave(&ncalls->lock, flags);
L
Linus Torvalds 已提交
366
	list_del(&tnode.llink);
367
 out_unlock:
D
Davide Libenzi 已提交
368 369
	spin_unlock_irqrestore(&ncalls->lock, flags);

370
	put_cpu();
D
Davide Libenzi 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	return error;
}

static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
{
	wake_up_nested((wait_queue_head_t *) cookie, 1 + call_nests);
	return 0;
}

/*
 * Perform a safe wake up of the poll wait list. The problem is that
 * with the new callback'd wake up system, it is possible that the
 * poll callback is reentered from inside the call to wake_up() done
 * on the poll wait queue head. The rule is that we cannot reenter the
 * wake up code from the same task more than EP_MAX_NESTS times,
 * and we cannot reenter the same wait queue head at all. This will
 * enable to have a hierarchy of epoll file descriptor of no more than
 * EP_MAX_NESTS deep.
 */
static void ep_poll_safewake(wait_queue_head_t *wq)
{
	ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
		       ep_poll_wakeup_proc, NULL, wq);
L
Linus Torvalds 已提交
394 395 396
}

/*
T
Tony Battersby 已提交
397 398 399
 * This function unregisters poll callbacks from the associated file
 * descriptor.  Must be called with "mtx" held (or "epmutex" if called from
 * ep_free).
L
Linus Torvalds 已提交
400
 */
401
static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
L
Linus Torvalds 已提交
402
{
403 404
	struct list_head *lsthead = &epi->pwqlist;
	struct eppoll_entry *pwq;
L
Linus Torvalds 已提交
405

T
Tony Battersby 已提交
406 407
	while (!list_empty(lsthead)) {
		pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
L
Linus Torvalds 已提交
408

T
Tony Battersby 已提交
409 410 411
		list_del(&pwq->llink);
		remove_wait_queue(pwq->whead, &pwq->wait);
		kmem_cache_free(pwq_cache, pwq);
L
Linus Torvalds 已提交
412 413 414
	}
}

D
Davide Libenzi 已提交
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
/**
 * ep_scan_ready_list - Scans the ready list in a way that makes possible for
 *                      the scan code, to call f_op->poll(). Also allows for
 *                      O(NumReady) performance.
 *
 * @ep: Pointer to the epoll private data structure.
 * @sproc: Pointer to the scan callback.
 * @priv: Private opaque data passed to the @sproc callback.
 *
 * Returns: The same integer error code returned by the @sproc callback.
 */
static int ep_scan_ready_list(struct eventpoll *ep,
			      int (*sproc)(struct eventpoll *,
					   struct list_head *, void *),
			      void *priv)
{
	int error, pwake = 0;
	unsigned long flags;
	struct epitem *epi, *nepi;
434
	LIST_HEAD(txlist);
D
Davide Libenzi 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450

	/*
	 * We need to lock this because we could be hit by
	 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
	 */
	mutex_lock(&ep->mtx);

	/*
	 * Steal the ready list, and re-init the original one to the
	 * empty list. Also, set ep->ovflist to NULL so that events
	 * happening while looping w/out locks, are not lost. We cannot
	 * have the poll callback to queue directly on ep->rdllist,
	 * because we want the "sproc" callback to be able to do it
	 * in a lockless way.
	 */
	spin_lock_irqsave(&ep->lock, flags);
451
	list_splice_init(&ep->rdllist, &txlist);
D
Davide Libenzi 已提交
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	ep->ovflist = NULL;
	spin_unlock_irqrestore(&ep->lock, flags);

	/*
	 * Now call the callback function.
	 */
	error = (*sproc)(ep, &txlist, priv);

	spin_lock_irqsave(&ep->lock, flags);
	/*
	 * During the time we spent inside the "sproc" callback, some
	 * other events might have been queued by the poll callback.
	 * We re-insert them inside the main ready-list here.
	 */
	for (nepi = ep->ovflist; (epi = nepi) != NULL;
	     nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
		/*
		 * We need to check if the item is already in the list.
		 * During the "sproc" callback execution time, items are
		 * queued into ->ovflist but the "txlist" might already
		 * contain them, and the list_splice() below takes care of them.
		 */
		if (!ep_is_linked(&epi->rdllink))
			list_add_tail(&epi->rdllink, &ep->rdllist);
	}
	/*
	 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
	 * releasing the lock, events will be queued in the normal way inside
	 * ep->rdllist.
	 */
	ep->ovflist = EP_UNACTIVE_PTR;

	/*
	 * Quickly re-inject items left on "txlist".
	 */
	list_splice(&txlist, &ep->rdllist);

	if (!list_empty(&ep->rdllist)) {
		/*
491 492
		 * Wake up (if active) both the eventpoll wait list and
		 * the ->poll() wait list (delayed after we release the lock).
D
Davide Libenzi 已提交
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
		 */
		if (waitqueue_active(&ep->wq))
			wake_up_locked(&ep->wq);
		if (waitqueue_active(&ep->poll_wait))
			pwake++;
	}
	spin_unlock_irqrestore(&ep->lock, flags);

	mutex_unlock(&ep->mtx);

	/* We have to call this outside the lock */
	if (pwake)
		ep_poll_safewake(&ep->poll_wait);

	return error;
}

510 511
/*
 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
512
 * all the associated resources. Must be called with "mtx" held.
513 514 515 516 517
 */
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{
	unsigned long flags;
	struct file *file = epi->ffd.file;
L
Linus Torvalds 已提交
518 519

	/*
520 521 522 523 524 525
	 * Removes poll wait queue hooks. We _have_ to do this without holding
	 * the "ep->lock" otherwise a deadlock might occur. This because of the
	 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
	 * queue head lock when unregistering the wait queue. The wakeup callback
	 * will run by holding the wait queue head lock and will call our callback
	 * that will try to get "ep->lock".
L
Linus Torvalds 已提交
526
	 */
527
	ep_unregister_pollwait(ep, epi);
L
Linus Torvalds 已提交
528

529
	/* Remove the current item from the list of epoll hooks */
J
Jonathan Corbet 已提交
530
	spin_lock(&file->f_lock);
531 532
	if (ep_is_linked(&epi->fllink))
		list_del_init(&epi->fllink);
J
Jonathan Corbet 已提交
533
	spin_unlock(&file->f_lock);
L
Linus Torvalds 已提交
534

D
Davide Libenzi 已提交
535
	rb_erase(&epi->rbn, &ep->rbr);
L
Linus Torvalds 已提交
536

537 538 539 540
	spin_lock_irqsave(&ep->lock, flags);
	if (ep_is_linked(&epi->rdllink))
		list_del_init(&epi->rdllink);
	spin_unlock_irqrestore(&ep->lock, flags);
L
Linus Torvalds 已提交
541

542
	/* At this point it is safe to free the eventpoll item */
543
	kmem_cache_free(epi_cache, epi);
L
Linus Torvalds 已提交
544

545 546
	atomic_dec(&ep->user->epoll_watches);

547
	return 0;
L
Linus Torvalds 已提交
548 549
}

550
static void ep_free(struct eventpoll *ep)
L
Linus Torvalds 已提交
551
{
552 553
	struct rb_node *rbp;
	struct epitem *epi;
L
Linus Torvalds 已提交
554

555 556
	/* We need to release all tasks waiting for these file */
	if (waitqueue_active(&ep->poll_wait))
D
Davide Libenzi 已提交
557
		ep_poll_safewake(&ep->poll_wait);
L
Linus Torvalds 已提交
558

559 560 561
	/*
	 * We need to lock this because we could be hit by
	 * eventpoll_release_file() while we're freeing the "struct eventpoll".
562
	 * We do not need to hold "ep->mtx" here because the epoll file
563 564 565 566 567
	 * is on the way to be removed and no one has references to it
	 * anymore. The only hit might come from eventpoll_release_file() but
	 * holding "epmutex" is sufficent here.
	 */
	mutex_lock(&epmutex);
L
Linus Torvalds 已提交
568 569

	/*
570
	 * Walks through the whole tree by unregistering poll callbacks.
L
Linus Torvalds 已提交
571
	 */
572 573 574 575 576
	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
		epi = rb_entry(rbp, struct epitem, rbn);

		ep_unregister_pollwait(ep, epi);
	}
L
Linus Torvalds 已提交
577 578

	/*
579 580
	 * Walks through the whole tree by freeing each "struct epitem". At this
	 * point we are sure no poll callbacks will be lingering around, and also by
581
	 * holding "epmutex" we can be sure that no file cleanup code will hit
582
	 * us during this operation. So we can avoid the lock on "ep->lock".
L
Linus Torvalds 已提交
583
	 */
584
	while ((rbp = rb_first(&ep->rbr)) != NULL) {
585 586 587
		epi = rb_entry(rbp, struct epitem, rbn);
		ep_remove(ep, epi);
	}
L
Linus Torvalds 已提交
588

589
	mutex_unlock(&epmutex);
590
	mutex_destroy(&ep->mtx);
591
	free_uid(ep->user);
592
	kfree(ep);
593
}
L
Linus Torvalds 已提交
594

595 596 597
static int ep_eventpoll_release(struct inode *inode, struct file *file)
{
	struct eventpoll *ep = file->private_data;
L
Linus Torvalds 已提交
598

599
	if (ep)
600 601 602
		ep_free(ep);

	return 0;
L
Linus Torvalds 已提交
603 604
}

605 606
static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
			       void *priv)
D
Davide Libenzi 已提交
607 608 609 610 611 612 613
{
	struct epitem *epi, *tmp;

	list_for_each_entry_safe(epi, tmp, head, rdllink) {
		if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
		    epi->event.events)
			return POLLIN | POLLRDNORM;
614
		else {
D
Davide Libenzi 已提交
615 616 617 618 619 620
			/*
			 * Item has been dropped into the ready list by the poll
			 * callback, but it's not actually ready, as far as
			 * caller requested events goes. We can remove it here.
			 */
			list_del_init(&epi->rdllink);
621
		}
D
Davide Libenzi 已提交
622 623 624 625 626 627 628 629 630 631
	}

	return 0;
}

static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
{
	return ep_scan_ready_list(priv, ep_read_events_proc, NULL);
}

632 633
static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
{
D
Davide Libenzi 已提交
634
	int pollflags;
635
	struct eventpoll *ep = file->private_data;
L
Linus Torvalds 已提交
636

637 638 639
	/* Insert inside our poll wait queue */
	poll_wait(file, &ep->poll_wait, wait);

D
Davide Libenzi 已提交
640 641 642 643 644 645 646 647
	/*
	 * Proceed to find out if wanted events are really available inside
	 * the ready list. This need to be done under ep_call_nested()
	 * supervision, since the call to f_op->poll() done on listed files
	 * could re-enter here.
	 */
	pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
				   ep_poll_readyevents_proc, ep, ep);
648

649
	return pollflags != -1 ? pollflags : 0;
650 651 652 653 654 655 656 657 658 659 660 661 662
}

/* File callbacks that implement the eventpoll file behaviour */
static const struct file_operations eventpoll_fops = {
	.release	= ep_eventpoll_release,
	.poll		= ep_eventpoll_poll
};

/* Fast test to see if the file is an evenpoll file */
static inline int is_file_epoll(struct file *f)
{
	return f->f_op == &eventpoll_fops;
}
D
Davide Libenzi 已提交
663 664

/*
665 666 667
 * This is called from eventpoll_release() to unlink files from the eventpoll
 * interface. We need to have this facility to cleanup correctly files that are
 * closed without being removed from the eventpoll interface.
D
Davide Libenzi 已提交
668
 */
669
void eventpoll_release_file(struct file *file)
D
Davide Libenzi 已提交
670
{
671 672 673
	struct list_head *lsthead = &file->f_ep_links;
	struct eventpoll *ep;
	struct epitem *epi;
D
Davide Libenzi 已提交
674 675

	/*
J
Jonathan Corbet 已提交
676
	 * We don't want to get "file->f_lock" because it is not
677 678
	 * necessary. It is not necessary because we're in the "struct file"
	 * cleanup path, and this means that noone is using this file anymore.
D
Davide Libenzi 已提交
679
	 * So, for example, epoll_ctl() cannot hit here since if we reach this
D
Davide Libenzi 已提交
680
	 * point, the file counter already went to zero and fget() would fail.
681
	 * The only hit might come from ep_free() but by holding the mutex
682
	 * will correctly serialize the operation. We do need to acquire
683
	 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
684
	 * from anywhere but ep_free().
J
Jonathan Corbet 已提交
685 686
	 *
	 * Besides, ep_remove() acquires the lock, so we can't hold it here.
D
Davide Libenzi 已提交
687
	 */
688
	mutex_lock(&epmutex);
D
Davide Libenzi 已提交
689

690 691
	while (!list_empty(lsthead)) {
		epi = list_first_entry(lsthead, struct epitem, fllink);
D
Davide Libenzi 已提交
692

693 694
		ep = epi->ep;
		list_del_init(&epi->fllink);
695
		mutex_lock(&ep->mtx);
696
		ep_remove(ep, epi);
697
		mutex_unlock(&ep->mtx);
D
Davide Libenzi 已提交
698 699
	}

700
	mutex_unlock(&epmutex);
D
Davide Libenzi 已提交
701 702
}

703
static int ep_alloc(struct eventpoll **pep)
L
Linus Torvalds 已提交
704
{
705 706 707
	int error;
	struct user_struct *user;
	struct eventpoll *ep;
L
Linus Torvalds 已提交
708

709 710 711 712 713
	user = get_current_user();
	error = -ENOMEM;
	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
	if (unlikely(!ep))
		goto free_uid;
L
Linus Torvalds 已提交
714

715
	spin_lock_init(&ep->lock);
716
	mutex_init(&ep->mtx);
L
Linus Torvalds 已提交
717 718 719 720
	init_waitqueue_head(&ep->wq);
	init_waitqueue_head(&ep->poll_wait);
	INIT_LIST_HEAD(&ep->rdllist);
	ep->rbr = RB_ROOT;
721
	ep->ovflist = EP_UNACTIVE_PTR;
722
	ep->user = user;
L
Linus Torvalds 已提交
723

724
	*pep = ep;
L
Linus Torvalds 已提交
725 726

	return 0;
727 728 729 730

free_uid:
	free_uid(user);
	return error;
L
Linus Torvalds 已提交
731 732 733
}

/*
734 735 736
 * Search the file inside the eventpoll tree. The RB tree operations
 * are protected by the "mtx" mutex, and ep_find() must be called with
 * "mtx" held.
L
Linus Torvalds 已提交
737 738 739 740 741 742 743 744
 */
static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
{
	int kcmp;
	struct rb_node *rbp;
	struct epitem *epi, *epir = NULL;
	struct epoll_filefd ffd;

745
	ep_set_ffd(&ffd, file, fd);
L
Linus Torvalds 已提交
746 747
	for (rbp = ep->rbr.rb_node; rbp; ) {
		epi = rb_entry(rbp, struct epitem, rbn);
748
		kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
L
Linus Torvalds 已提交
749 750 751 752 753 754 755 756 757 758 759 760 761 762
		if (kcmp > 0)
			rbp = rbp->rb_right;
		else if (kcmp < 0)
			rbp = rbp->rb_left;
		else {
			epir = epi;
			break;
		}
	}

	return epir;
}

/*
763 764 765
 * This is the callback that is passed to the wait queue wakeup
 * machanism. It is called by the stored file descriptors when they
 * have events to report.
L
Linus Torvalds 已提交
766
 */
767
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
L
Linus Torvalds 已提交
768
{
769 770 771 772
	int pwake = 0;
	unsigned long flags;
	struct epitem *epi = ep_item_from_wait(wait);
	struct eventpoll *ep = epi->ep;
L
Linus Torvalds 已提交
773

774
	spin_lock_irqsave(&ep->lock, flags);
L
Linus Torvalds 已提交
775

776 777 778 779 780 781 782
	/*
	 * If the event mask does not contain any poll(2) event, we consider the
	 * descriptor to be disabled. This condition is likely the effect of the
	 * EPOLLONESHOT bit that disables the descriptor when an event is received,
	 * until the next EPOLL_CTL_MOD will be issued.
	 */
	if (!(epi->event.events & ~EP_PRIVATE_BITS))
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
		goto out_unlock;

	/*
	 * If we are trasfering events to userspace, we can hold no locks
	 * (because we're accessing user memory, and because of linux f_op->poll()
	 * semantics). All the events that happens during that period of time are
	 * chained in ep->ovflist and requeued later on.
	 */
	if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
		if (epi->next == EP_UNACTIVE_PTR) {
			epi->next = ep->ovflist;
			ep->ovflist = epi;
		}
		goto out_unlock;
	}
L
Linus Torvalds 已提交
798

799
	/* If this file is already in the ready list we exit soon */
D
Davide Libenzi 已提交
800 801
	if (!ep_is_linked(&epi->rdllink))
		list_add_tail(&epi->rdllink, &ep->rdllist);
802 803 804 805 806 807

	/*
	 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
	 * wait list.
	 */
	if (waitqueue_active(&ep->wq))
M
Matthew Wilcox 已提交
808
		wake_up_locked(&ep->wq);
809 810 811
	if (waitqueue_active(&ep->poll_wait))
		pwake++;

812
out_unlock:
813
	spin_unlock_irqrestore(&ep->lock, flags);
L
Linus Torvalds 已提交
814

815 816
	/* We have to call this outside the lock */
	if (pwake)
D
Davide Libenzi 已提交
817
		ep_poll_safewake(&ep->poll_wait);
818 819 820

	return 1;
}
L
Linus Torvalds 已提交
821 822 823 824 825 826 827 828

/*
 * This is the callback that is used to add our wait queue to the
 * target file wakeup lists.
 */
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
				 poll_table *pt)
{
829
	struct epitem *epi = ep_item_from_epqueue(pt);
L
Linus Torvalds 已提交
830 831
	struct eppoll_entry *pwq;

832
	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
L
Linus Torvalds 已提交
833 834 835 836 837 838
		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
		pwq->whead = whead;
		pwq->base = epi;
		add_wait_queue(whead, &pwq->wait);
		list_add_tail(&pwq->llink, &epi->pwqlist);
		epi->nwait++;
839
	} else {
L
Linus Torvalds 已提交
840 841
		/* We have to signal that an error occurred */
		epi->nwait = -1;
842
	}
L
Linus Torvalds 已提交
843 844 845 846 847 848 849 850 851 852 853
}

static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
{
	int kcmp;
	struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
	struct epitem *epic;

	while (*p) {
		parent = *p;
		epic = rb_entry(parent, struct epitem, rbn);
854
		kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
L
Linus Torvalds 已提交
855 856 857 858 859 860 861 862 863
		if (kcmp > 0)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}
	rb_link_node(&epi->rbn, parent, p);
	rb_insert_color(&epi->rbn, &ep->rbr);
}

864 865 866
/*
 * Must be called with "mtx" held.
 */
L
Linus Torvalds 已提交
867 868 869 870 871 872 873 874
static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
		     struct file *tfile, int fd)
{
	int error, revents, pwake = 0;
	unsigned long flags;
	struct epitem *epi;
	struct ep_pqueue epq;

875 876 877
	if (unlikely(atomic_read(&ep->user->epoll_watches) >=
		     max_user_watches))
		return -ENOSPC;
878
	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
879
		return -ENOMEM;
L
Linus Torvalds 已提交
880 881 882 883 884 885

	/* Item initialization follow here ... */
	INIT_LIST_HEAD(&epi->rdllink);
	INIT_LIST_HEAD(&epi->fllink);
	INIT_LIST_HEAD(&epi->pwqlist);
	epi->ep = ep;
886
	ep_set_ffd(&epi->ffd, tfile, fd);
L
Linus Torvalds 已提交
887 888
	epi->event = *event;
	epi->nwait = 0;
889
	epi->next = EP_UNACTIVE_PTR;
L
Linus Torvalds 已提交
890 891 892 893 894 895 896 897

	/* Initialize the poll table using the queue callback */
	epq.epi = epi;
	init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);

	/*
	 * Attach the item to the poll hooks and get current event bits.
	 * We can safely use the file* here because its usage count has
898 899 900
	 * been increased by the caller of this function. Note that after
	 * this operation completes, the poll callback can start hitting
	 * the new item.
L
Linus Torvalds 已提交
901 902 903 904 905 906 907 908
	 */
	revents = tfile->f_op->poll(tfile, &epq.pt);

	/*
	 * We have to check if something went wrong during the poll wait queue
	 * install process. Namely an allocation for a wait queue failed due
	 * high memory pressure.
	 */
909
	error = -ENOMEM;
L
Linus Torvalds 已提交
910
	if (epi->nwait < 0)
911
		goto error_unregister;
L
Linus Torvalds 已提交
912 913

	/* Add the current item to the list of active epoll hook for this file */
J
Jonathan Corbet 已提交
914
	spin_lock(&tfile->f_lock);
L
Linus Torvalds 已提交
915
	list_add_tail(&epi->fllink, &tfile->f_ep_links);
J
Jonathan Corbet 已提交
916
	spin_unlock(&tfile->f_lock);
L
Linus Torvalds 已提交
917

918 919 920 921
	/*
	 * Add the current item to the RB tree. All RB tree operations are
	 * protected by "mtx", and ep_insert() is called with "mtx" held.
	 */
L
Linus Torvalds 已提交
922 923
	ep_rbtree_insert(ep, epi);

924 925 926
	/* We have to drop the new item inside our item list to keep track of it */
	spin_lock_irqsave(&ep->lock, flags);

L
Linus Torvalds 已提交
927
	/* If the file is already "ready" we drop it inside the ready list */
928
	if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
L
Linus Torvalds 已提交
929 930 931 932
		list_add_tail(&epi->rdllink, &ep->rdllist);

		/* Notify waiting tasks that events are available */
		if (waitqueue_active(&ep->wq))
M
Matthew Wilcox 已提交
933
			wake_up_locked(&ep->wq);
L
Linus Torvalds 已提交
934 935 936 937
		if (waitqueue_active(&ep->poll_wait))
			pwake++;
	}

938
	spin_unlock_irqrestore(&ep->lock, flags);
L
Linus Torvalds 已提交
939

940 941
	atomic_inc(&ep->user->epoll_watches);

L
Linus Torvalds 已提交
942 943
	/* We have to call this outside the lock */
	if (pwake)
D
Davide Libenzi 已提交
944
		ep_poll_safewake(&ep->poll_wait);
L
Linus Torvalds 已提交
945 946 947

	return 0;

948
error_unregister:
L
Linus Torvalds 已提交
949 950 951 952
	ep_unregister_pollwait(ep, epi);

	/*
	 * We need to do this because an event could have been arrived on some
D
Davide Libenzi 已提交
953 954 955
	 * allocated wait queue. Note that we don't care about the ep->ovflist
	 * list, since that is used/cleaned only inside a section bound by "mtx".
	 * And ep_insert() is called with "mtx" held.
L
Linus Torvalds 已提交
956
	 */
957
	spin_lock_irqsave(&ep->lock, flags);
958
	if (ep_is_linked(&epi->rdllink))
959
		list_del_init(&epi->rdllink);
960
	spin_unlock_irqrestore(&ep->lock, flags);
L
Linus Torvalds 已提交
961

962
	kmem_cache_free(epi_cache, epi);
963

L
Linus Torvalds 已提交
964 965 966 967 968
	return error;
}

/*
 * Modify the interest event mask by dropping an event if the new mask
969
 * has a match in the current file status. Must be called with "mtx" held.
L
Linus Torvalds 已提交
970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
 */
static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
{
	int pwake = 0;
	unsigned int revents;
	unsigned long flags;

	/*
	 * Set the new event interest mask before calling f_op->poll(), otherwise
	 * a potential race might occur. In fact if we do this operation inside
	 * the lock, an event might happen between the f_op->poll() call and the
	 * new event set registering.
	 */
	epi->event.events = event->events;

	/*
	 * Get current event bits. We can safely use the file* here because
	 * its usage count has been increased by the caller of this function.
	 */
	revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);

991
	spin_lock_irqsave(&ep->lock, flags);
L
Linus Torvalds 已提交
992 993 994 995 996

	/* Copy the data member from inside the lock */
	epi->event.data = event->data;

	/*
997
	 * If the item is "hot" and it is not registered inside the ready
D
Davide Libenzi 已提交
998
	 * list, push it inside.
L
Linus Torvalds 已提交
999
	 */
1000 1001 1002 1003 1004 1005
	if (revents & event->events) {
		if (!ep_is_linked(&epi->rdllink)) {
			list_add_tail(&epi->rdllink, &ep->rdllist);

			/* Notify waiting tasks that events are available */
			if (waitqueue_active(&ep->wq))
M
Matthew Wilcox 已提交
1006
				wake_up_locked(&ep->wq);
1007 1008
			if (waitqueue_active(&ep->poll_wait))
				pwake++;
1009 1010
		}
	}
1011
	spin_unlock_irqrestore(&ep->lock, flags);
L
Linus Torvalds 已提交
1012

1013 1014
	/* We have to call this outside the lock */
	if (pwake)
D
Davide Libenzi 已提交
1015
		ep_poll_safewake(&ep->poll_wait);
L
Linus Torvalds 已提交
1016

1017
	return 0;
L
Linus Torvalds 已提交
1018 1019
}

1020 1021
static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
			       void *priv)
L
Linus Torvalds 已提交
1022
{
D
Davide Libenzi 已提交
1023 1024
	struct ep_send_events_data *esed = priv;
	int eventcnt;
1025
	unsigned int revents;
D
Davide Libenzi 已提交
1026 1027
	struct epitem *epi;
	struct epoll_event __user *uevent;
L
Linus Torvalds 已提交
1028

1029
	/*
D
Davide Libenzi 已提交
1030 1031 1032
	 * We can loop without lock because we are passed a task private list.
	 * Items cannot vanish during the loop because ep_scan_ready_list() is
	 * holding "mtx" during this call.
1033
	 */
D
Davide Libenzi 已提交
1034 1035 1036
	for (eventcnt = 0, uevent = esed->events;
	     !list_empty(head) && eventcnt < esed->maxevents;) {
		epi = list_first_entry(head, struct epitem, rdllink);
1037 1038

		list_del_init(&epi->rdllink);
L
Linus Torvalds 已提交
1039

1040 1041
		revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
			epi->event.events;
D
Davide Libenzi 已提交
1042

1043
		/*
D
Davide Libenzi 已提交
1044 1045 1046 1047
		 * If the event mask intersect the caller-requested one,
		 * deliver the event to userspace. Again, ep_scan_ready_list()
		 * is holding "mtx", so no operations coming from userspace
		 * can change the item.
1048 1049
		 */
		if (revents) {
D
Davide Libenzi 已提交
1050
			if (__put_user(revents, &uevent->events) ||
1051 1052
			    __put_user(epi->event.data, &uevent->data)) {
				list_add(&epi->rdllink, head);
1053
				return eventcnt ? eventcnt : -EFAULT;
1054
			}
1055
			eventcnt++;
D
Davide Libenzi 已提交
1056
			uevent++;
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
			if (epi->event.events & EPOLLONESHOT)
				epi->event.events &= EP_PRIVATE_BITS;
			else if (!(epi->event.events & EPOLLET)) {
				/*
				 * If this file has been added with Level
				 * Trigger mode, we need to insert back inside
				 * the ready list, so that the next call to
				 * epoll_wait() will check again the events
				 * availability. At this point, noone can insert
				 * into ep->rdllist besides us. The epoll_ctl()
				 * callers are locked out by
				 * ep_scan_ready_list() holding "mtx" and the
				 * poll callback will queue them in ep->ovflist.
				 */
				list_add_tail(&epi->rdllink, &ep->rdllist);
			}
		}
	}
D
Davide Libenzi 已提交
1075 1076 1077

	return eventcnt;
}
1078

1079 1080
static int ep_send_events(struct eventpoll *ep,
			  struct epoll_event __user *events, int maxevents)
D
Davide Libenzi 已提交
1081 1082
{
	struct ep_send_events_data esed;
L
Linus Torvalds 已提交
1083

D
Davide Libenzi 已提交
1084 1085
	esed.maxevents = maxevents;
	esed.events = events;
1086

D
Davide Libenzi 已提交
1087
	return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
L
Linus Torvalds 已提交
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
}

static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
		   int maxevents, long timeout)
{
	int res, eavail;
	unsigned long flags;
	long jtimeout;
	wait_queue_t wait;

	/*
D
Davide Libenzi 已提交
1099
	 * Calculate the timeout by checking for the "infinite" value (-1)
L
Linus Torvalds 已提交
1100 1101 1102
	 * and the overflow condition. The passed timeout is in milliseconds,
	 * that why (t * HZ) / 1000.
	 */
1103 1104
	jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
		MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
L
Linus Torvalds 已提交
1105 1106

retry:
1107
	spin_lock_irqsave(&ep->lock, flags);
L
Linus Torvalds 已提交
1108 1109 1110 1111 1112 1113 1114 1115 1116

	res = 0;
	if (list_empty(&ep->rdllist)) {
		/*
		 * We don't have any available event to return to the caller.
		 * We need to sleep here, and we will be wake up by
		 * ep_poll_callback() when events will become available.
		 */
		init_waitqueue_entry(&wait, current);
1117
		wait.flags |= WQ_FLAG_EXCLUSIVE;
1118
		__add_wait_queue(&ep->wq, &wait);
L
Linus Torvalds 已提交
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133

		for (;;) {
			/*
			 * We don't want to sleep if the ep_poll_callback() sends us
			 * a wakeup in between. That's why we set the task state
			 * to TASK_INTERRUPTIBLE before doing the checks.
			 */
			set_current_state(TASK_INTERRUPTIBLE);
			if (!list_empty(&ep->rdllist) || !jtimeout)
				break;
			if (signal_pending(current)) {
				res = -EINTR;
				break;
			}

1134
			spin_unlock_irqrestore(&ep->lock, flags);
L
Linus Torvalds 已提交
1135
			jtimeout = schedule_timeout(jtimeout);
1136
			spin_lock_irqsave(&ep->lock, flags);
L
Linus Torvalds 已提交
1137
		}
1138
		__remove_wait_queue(&ep->wq, &wait);
L
Linus Torvalds 已提交
1139 1140 1141 1142

		set_current_state(TASK_RUNNING);
	}
	/* Is it worth to try to dig for events ? */
D
Davide Libenzi 已提交
1143
	eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
L
Linus Torvalds 已提交
1144

1145
	spin_unlock_irqrestore(&ep->lock, flags);
L
Linus Torvalds 已提交
1146 1147 1148 1149 1150 1151 1152

	/*
	 * Try to transfer events to user space. In case we get 0 events and
	 * there's still timeout left over, we go trying again in search of
	 * more luck.
	 */
	if (!res && eavail &&
1153
	    !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
L
Linus Torvalds 已提交
1154 1155 1156 1157 1158
		goto retry;

	return res;
}

1159
/*
1160
 * Open an eventpoll file descriptor.
1161
 */
1162
SYSCALL_DEFINE1(epoll_create1, int, flags)
1163
{
D
Davide Libenzi 已提交
1164 1165
	int error;
	struct eventpoll *ep = NULL;
1166

1167 1168 1169
	/* Check the EPOLL_* constant for consistency.  */
	BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);

1170 1171
	if (flags & ~EPOLL_CLOEXEC)
		return -EINVAL;
1172
	/*
D
Davide Libenzi 已提交
1173
	 * Create the internal data structure ("struct eventpoll").
1174
	 */
1175
	error = ep_alloc(&ep);
D
Davide Libenzi 已提交
1176 1177
	if (error < 0)
		return error;
1178 1179
	/*
	 * Creates all the items needed to setup an eventpoll file. That is,
A
Al Viro 已提交
1180
	 * a file structure and a free file descriptor.
1181
	 */
D
Davide Libenzi 已提交
1182 1183 1184
	error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
				 flags & O_CLOEXEC);
	if (error < 0)
A
Al Viro 已提交
1185
		ep_free(ep);
1186

D
Davide Libenzi 已提交
1187
	return error;
1188 1189
}

1190
SYSCALL_DEFINE1(epoll_create, int, size)
U
Ulrich Drepper 已提交
1191
{
1192 1193 1194 1195
	if (size < 0)
		return -EINVAL;

	return sys_epoll_create1(0);
U
Ulrich Drepper 已提交
1196 1197
}

1198 1199 1200
/*
 * The following function implements the controller interface for
 * the eventpoll file that enables the insertion/removal/change of
D
Davide Libenzi 已提交
1201
 * file descriptors inside the interest set.
1202
 */
1203 1204
SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
		struct epoll_event __user *, event)
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
{
	int error;
	struct file *file, *tfile;
	struct eventpoll *ep;
	struct epitem *epi;
	struct epoll_event epds;

	error = -EFAULT;
	if (ep_op_has_event(op) &&
	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
		goto error_return;

	/* Get the "struct file *" for the eventpoll file */
	error = -EBADF;
	file = fget(epfd);
	if (!file)
		goto error_return;

	/* Get the "struct file *" for the target file */
	tfile = fget(fd);
	if (!tfile)
		goto error_fput;

	/* The target file descriptor must support poll */
	error = -EPERM;
	if (!tfile->f_op || !tfile->f_op->poll)
		goto error_tgt_fput;

	/*
	 * We have to check that the file structure underneath the file descriptor
	 * the user passed to us _is_ an eventpoll file. And also we do not permit
	 * adding an epoll file descriptor inside itself.
	 */
	error = -EINVAL;
	if (file == tfile || !is_file_epoll(file))
		goto error_tgt_fput;

	/*
	 * At this point it is safe to assume that the "private_data" contains
	 * our own data structure.
	 */
	ep = file->private_data;

1248
	mutex_lock(&ep->mtx);
1249

D
Davide Libenzi 已提交
1250 1251 1252 1253 1254
	/*
	 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
	 * above, we can be sure to be able to use the item looked up by
	 * ep_find() till we release the mutex.
	 */
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
	epi = ep_find(ep, tfile, fd);

	error = -EINVAL;
	switch (op) {
	case EPOLL_CTL_ADD:
		if (!epi) {
			epds.events |= POLLERR | POLLHUP;

			error = ep_insert(ep, &epds, tfile, fd);
		} else
			error = -EEXIST;
		break;
	case EPOLL_CTL_DEL:
		if (epi)
			error = ep_remove(ep, epi);
		else
			error = -ENOENT;
		break;
	case EPOLL_CTL_MOD:
		if (epi) {
			epds.events |= POLLERR | POLLHUP;
			error = ep_modify(ep, epi, &epds);
		} else
			error = -ENOENT;
		break;
	}
1281
	mutex_unlock(&ep->mtx);
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295

error_tgt_fput:
	fput(tfile);
error_fput:
	fput(file);
error_return:

	return error;
}

/*
 * Implement the event wait interface for the eventpoll file. It is the kernel
 * part of the user space epoll_wait(2).
 */
1296 1297
SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
		int, maxevents, int, timeout)
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
{
	int error;
	struct file *file;
	struct eventpoll *ep;

	/* The maximum number of event must be greater than zero */
	if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
		return -EINVAL;

	/* Verify that the area passed by the user is writeable */
	if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
		error = -EFAULT;
		goto error_return;
	}

	/* Get the "struct file *" for the eventpoll file */
	error = -EBADF;
	file = fget(epfd);
	if (!file)
		goto error_return;

	/*
	 * We have to check that the file structure underneath the fd
	 * the user passed to us _is_ an eventpoll file.
	 */
	error = -EINVAL;
	if (!is_file_epoll(file))
		goto error_fput;

	/*
	 * At this point it is safe to assume that the "private_data" contains
	 * our own data structure.
	 */
	ep = file->private_data;

	/* Time to fish for events ... */
	error = ep_poll(ep, events, maxevents, timeout);

error_fput:
	fput(file);
error_return:

	return error;
}

1343
#ifdef HAVE_SET_RESTORE_SIGMASK
1344 1345 1346 1347 1348

/*
 * Implement the event wait interface for the eventpoll file. It is the kernel
 * part of the user space epoll_pwait(2).
 */
1349 1350 1351
SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
		int, maxevents, int, timeout, const sigset_t __user *, sigmask,
		size_t, sigsetsize)
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
{
	int error;
	sigset_t ksigmask, sigsaved;

	/*
	 * If the caller wants a certain signal mask to be set during the wait,
	 * we apply it here.
	 */
	if (sigmask) {
		if (sigsetsize != sizeof(sigset_t))
			return -EINVAL;
		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
			return -EFAULT;
		sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
	}

	error = sys_epoll_wait(epfd, events, maxevents, timeout);

	/*
	 * If we changed the signal mask, we need to restore the original one.
	 * In case we've got a signal while waiting, we do not restore the
	 * signal mask yet, and we allow do_signal() to deliver the signal on
	 * the way back to userspace, before the signal mask is restored.
	 */
	if (sigmask) {
		if (error == -EINTR) {
			memcpy(&current->saved_sigmask, &sigsaved,
1380
			       sizeof(sigsaved));
1381
			set_restore_sigmask();
1382 1383 1384 1385 1386 1387 1388
		} else
			sigprocmask(SIG_SETMASK, &sigsaved, NULL);
	}

	return error;
}

1389
#endif /* HAVE_SET_RESTORE_SIGMASK */
1390

L
Linus Torvalds 已提交
1391 1392
static int __init eventpoll_init(void)
{
1393 1394 1395
	struct sysinfo si;

	si_meminfo(&si);
1396 1397 1398 1399
	/*
	 * Allows top 4% of lomem to be allocated for epoll watches (per user).
	 */
	max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
1400
		EP_ITEM_COST;
L
Linus Torvalds 已提交
1401 1402

	/* Initialize the structure used to perform safe poll wait head wake ups */
D
Davide Libenzi 已提交
1403 1404 1405 1406
	ep_nested_calls_init(&poll_safewake_ncalls);

	/* Initialize the structure used to perform file's f_op->poll() calls */
	ep_nested_calls_init(&poll_readywalk_ncalls);
L
Linus Torvalds 已提交
1407 1408 1409

	/* Allocates slab cache used to allocate "struct epitem" items */
	epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
D
Davide Libenzi 已提交
1410
			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1411 1412 1413

	/* Allocates slab cache used to allocate "struct eppoll_entry" */
	pwq_cache = kmem_cache_create("eventpoll_pwq",
D
Davide Libenzi 已提交
1414
			sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1415 1416 1417

	return 0;
}
D
Davide Libenzi 已提交
1418
fs_initcall(eventpoll_init);