提交 296e236e 编写于 作者: D Davide Libenzi 提交者: Linus Torvalds

epoll: fix epoll's own poll (update)

Signed-off-by: NDavide Libenzi <davidel@xmailserver.org>
Cc: Pavel Pisa <pisa@cmp.felk.cvut.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 5071f97e
...@@ -454,9 +454,7 @@ static int ep_scan_ready_list(struct eventpoll *ep, ...@@ -454,9 +454,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
int error, pwake = 0; int error, pwake = 0;
unsigned long flags; unsigned long flags;
struct epitem *epi, *nepi; struct epitem *epi, *nepi;
struct list_head txlist; LIST_HEAD(txlist);
INIT_LIST_HEAD(&txlist);
/* /*
* We need to lock this because we could be hit by * We need to lock this because we could be hit by
...@@ -473,8 +471,7 @@ static int ep_scan_ready_list(struct eventpoll *ep, ...@@ -473,8 +471,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
* in a lockless way. * in a lockless way.
*/ */
spin_lock_irqsave(&ep->lock, flags); spin_lock_irqsave(&ep->lock, flags);
list_splice(&ep->rdllist, &txlist); list_splice_init(&ep->rdllist, &txlist);
INIT_LIST_HEAD(&ep->rdllist);
ep->ovflist = NULL; ep->ovflist = NULL;
spin_unlock_irqrestore(&ep->lock, flags); spin_unlock_irqrestore(&ep->lock, flags);
...@@ -514,8 +511,8 @@ static int ep_scan_ready_list(struct eventpoll *ep, ...@@ -514,8 +511,8 @@ static int ep_scan_ready_list(struct eventpoll *ep,
if (!list_empty(&ep->rdllist)) { if (!list_empty(&ep->rdllist)) {
/* /*
* Wake up (if active) both the eventpoll wait list and the ->poll() * Wake up (if active) both the eventpoll wait list and
* wait list (delayed after we release the lock). * the ->poll() wait list (delayed after we release the lock).
*/ */
if (waitqueue_active(&ep->wq)) if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq); wake_up_locked(&ep->wq);
...@@ -632,7 +629,8 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file) ...@@ -632,7 +629,8 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
return 0; return 0;
} }
static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{ {
struct epitem *epi, *tmp; struct epitem *epi, *tmp;
...@@ -640,13 +638,14 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, voi ...@@ -640,13 +638,14 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, voi
if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
epi->event.events) epi->event.events)
return POLLIN | POLLRDNORM; return POLLIN | POLLRDNORM;
else else {
/* /*
* Item has been dropped into the ready list by the poll * Item has been dropped into the ready list by the poll
* callback, but it's not actually ready, as far as * callback, but it's not actually ready, as far as
* caller requested events goes. We can remove it here. * caller requested events goes. We can remove it here.
*/ */
list_del_init(&epi->rdllink); list_del_init(&epi->rdllink);
}
} }
return 0; return 0;
...@@ -674,7 +673,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) ...@@ -674,7 +673,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
ep_poll_readyevents_proc, ep, ep); ep_poll_readyevents_proc, ep, ep);
return pollflags != -1 ? pollflags: 0; return pollflags != -1 ? pollflags : 0;
} }
/* File callbacks that implement the eventpoll file behaviour */ /* File callbacks that implement the eventpoll file behaviour */
...@@ -872,9 +871,10 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, ...@@ -872,9 +871,10 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
add_wait_queue(whead, &pwq->wait); add_wait_queue(whead, &pwq->wait);
list_add_tail(&pwq->llink, &epi->pwqlist); list_add_tail(&pwq->llink, &epi->pwqlist);
epi->nwait++; epi->nwait++;
} else } else {
/* We have to signal that an error occurred */ /* We have to signal that an error occurred */
epi->nwait = -1; epi->nwait = -1;
}
} }
static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
...@@ -1055,62 +1055,65 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even ...@@ -1055,62 +1055,65 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
return 0; return 0;
} }
static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{ {
struct ep_send_events_data *esed = priv; struct ep_send_events_data *esed = priv;
int eventcnt; int eventcnt;
unsigned int revents; unsigned int revents;
struct epitem *epi; struct epitem *epi;
struct epoll_event __user *uevent; struct epoll_event __user *uevent;
/* /*
* We can loop without lock because we are passed a task private list. * We can loop without lock because we are passed a task private list.
* Items cannot vanish during the loop because ep_scan_ready_list() is * Items cannot vanish during the loop because ep_scan_ready_list() is
* holding "mtx" during this call. * holding "mtx" during this call.
*/ */
for (eventcnt = 0, uevent = esed->events; for (eventcnt = 0, uevent = esed->events;
!list_empty(head) && eventcnt < esed->maxevents;) { !list_empty(head) && eventcnt < esed->maxevents;) {
epi = list_first_entry(head, struct epitem, rdllink); epi = list_first_entry(head, struct epitem, rdllink);
list_del_init(&epi->rdllink); list_del_init(&epi->rdllink);
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
epi->event.events; epi->event.events;
/* /*
* If the event mask intersect the caller-requested one, * If the event mask intersect the caller-requested one,
* deliver the event to userspace. Again, ep_scan_ready_list() * deliver the event to userspace. Again, ep_scan_ready_list()
* is holding "mtx", so no operations coming from userspace * is holding "mtx", so no operations coming from userspace
* can change the item. * can change the item.
*/ */
if (revents) { if (revents) {
if (__put_user(revents, &uevent->events) || if (__put_user(revents, &uevent->events) ||
__put_user(epi->event.data, &uevent->data)) __put_user(epi->event.data, &uevent->data))
return eventcnt ? eventcnt: -EFAULT; return eventcnt ? eventcnt : -EFAULT;
eventcnt++; eventcnt++;
uevent++; uevent++;
if (epi->event.events & EPOLLONESHOT) if (epi->event.events & EPOLLONESHOT)
epi->event.events &= EP_PRIVATE_BITS; epi->event.events &= EP_PRIVATE_BITS;
else if (!(epi->event.events & EPOLLET)) else if (!(epi->event.events & EPOLLET)) {
/* /*
* If this file has been added with Level Trigger * If this file has been added with Level
* mode, we need to insert back inside the ready * Trigger mode, we need to insert back inside
* list, so that the next call to epoll_wait() * the ready list, so that the next call to
* will check again the events availability. * epoll_wait() will check again the events
* At this point, noone can insert into ep->rdllist * availability. At this point, noone can insert
* besides us. The epoll_ctl() callers are locked * into ep->rdllist besides us. The epoll_ctl()
* out by ep_scan_ready_list() holding "mtx" and * callers are locked out by
* the poll callback will queue them in ep->ovflist. * ep_scan_ready_list() holding "mtx" and the
*/ * poll callback will queue them in ep->ovflist.
list_add_tail(&epi->rdllink, &ep->rdllist); */
} list_add_tail(&epi->rdllink, &ep->rdllist);
} }
}
}
return eventcnt; return eventcnt;
} }
static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events, static int ep_send_events(struct eventpoll *ep,
int maxevents) struct epoll_event __user *events, int maxevents)
{ {
struct ep_send_events_data esed; struct ep_send_events_data esed;
...@@ -1194,40 +1197,41 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, ...@@ -1194,40 +1197,41 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
*/ */
SYSCALL_DEFINE1(epoll_create1, int, flags) SYSCALL_DEFINE1(epoll_create1, int, flags)
{ {
int error; int error, fd = -1;
struct eventpoll *ep = NULL; struct eventpoll *ep;
/* Check the EPOLL_* constant for consistency. */ /* Check the EPOLL_* constant for consistency. */
BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
if (flags & ~EPOLL_CLOEXEC)
return -EINVAL;
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n",
current, flags)); current, flags));
error = -EINVAL;
if (flags & ~EPOLL_CLOEXEC)
goto error_return;
/* /*
* Create the internal data structure ("struct eventpoll"). * Create the internal data structure ( "struct eventpoll" ).
*/ */
error = ep_alloc(&ep); error = ep_alloc(&ep);
if (error < 0) if (error < 0) {
fd = error;
goto error_return; goto error_return;
}
/* /*
* Creates all the items needed to setup an eventpoll file. That is, * Creates all the items needed to setup an eventpoll file. That is,
* a file structure and a free file descriptor. * a file structure and a free file descriptor.
*/ */
error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, fd = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
flags & O_CLOEXEC); flags & O_CLOEXEC);
if (error < 0) if (fd < 0)
ep_free(ep); ep_free(ep);
error_return: error_return:
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
current, flags, error)); current, flags, fd));
return error; return fd;
} }
SYSCALL_DEFINE1(epoll_create, int, size) SYSCALL_DEFINE1(epoll_create, int, size)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册