提交 b2ac2ea6 编写于 作者: D Davidlohr Bueso 提交者: Linus Torvalds

fs/epoll: use faster rb_first_cached()

...  such that we can avoid the tree walks to get the node with the
smallest key.  Semantically the same, as the previously used rb_first(),
but O(1).  The main overhead is the extra footprint for the cached rb_node
pointer, which should not matter for epoll.

Link: http://lkml.kernel.org/r/20170719014603.19029-15-dave@stgolabs.netSigned-off-by: NDavidlohr Bueso <dbueso@suse.de>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 410bd5ec
...@@ -205,7 +205,7 @@ struct eventpoll { ...@@ -205,7 +205,7 @@ struct eventpoll {
struct list_head rdllist; struct list_head rdllist;
/* RB tree root used to store monitored fd structs */ /* RB tree root used to store monitored fd structs */
struct rb_root rbr; struct rb_root_cached rbr;
/* /*
* This is a single linked list that chains all the "struct epitem" that * This is a single linked list that chains all the "struct epitem" that
...@@ -796,7 +796,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) ...@@ -796,7 +796,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
list_del_rcu(&epi->fllink); list_del_rcu(&epi->fllink);
spin_unlock(&file->f_lock); spin_unlock(&file->f_lock);
rb_erase(&epi->rbn, &ep->rbr); rb_erase_cached(&epi->rbn, &ep->rbr);
spin_lock_irqsave(&ep->lock, flags); spin_lock_irqsave(&ep->lock, flags);
if (ep_is_linked(&epi->rdllink)) if (ep_is_linked(&epi->rdllink))
...@@ -840,7 +840,7 @@ static void ep_free(struct eventpoll *ep) ...@@ -840,7 +840,7 @@ static void ep_free(struct eventpoll *ep)
/* /*
* Walks through the whole tree by unregistering poll callbacks. * Walks through the whole tree by unregistering poll callbacks.
*/ */
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn); epi = rb_entry(rbp, struct epitem, rbn);
ep_unregister_pollwait(ep, epi); ep_unregister_pollwait(ep, epi);
...@@ -856,7 +856,7 @@ static void ep_free(struct eventpoll *ep) ...@@ -856,7 +856,7 @@ static void ep_free(struct eventpoll *ep)
* a lockdep warning. * a lockdep warning.
*/ */
mutex_lock(&ep->mtx); mutex_lock(&ep->mtx);
while ((rbp = rb_first(&ep->rbr)) != NULL) { while ((rbp = rb_first_cached(&ep->rbr)) != NULL) {
epi = rb_entry(rbp, struct epitem, rbn); epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi); ep_remove(ep, epi);
cond_resched(); cond_resched();
...@@ -963,7 +963,7 @@ static void ep_show_fdinfo(struct seq_file *m, struct file *f) ...@@ -963,7 +963,7 @@ static void ep_show_fdinfo(struct seq_file *m, struct file *f)
struct rb_node *rbp; struct rb_node *rbp;
mutex_lock(&ep->mtx); mutex_lock(&ep->mtx);
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
struct epitem *epi = rb_entry(rbp, struct epitem, rbn); struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
struct inode *inode = file_inode(epi->ffd.file); struct inode *inode = file_inode(epi->ffd.file);
...@@ -1040,7 +1040,7 @@ static int ep_alloc(struct eventpoll **pep) ...@@ -1040,7 +1040,7 @@ static int ep_alloc(struct eventpoll **pep)
init_waitqueue_head(&ep->wq); init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait); init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist); INIT_LIST_HEAD(&ep->rdllist);
ep->rbr = RB_ROOT; ep->rbr = RB_ROOT_CACHED;
ep->ovflist = EP_UNACTIVE_PTR; ep->ovflist = EP_UNACTIVE_PTR;
ep->user = user; ep->user = user;
...@@ -1066,7 +1066,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) ...@@ -1066,7 +1066,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
struct epoll_filefd ffd; struct epoll_filefd ffd;
ep_set_ffd(&ffd, file, fd); ep_set_ffd(&ffd, file, fd);
for (rbp = ep->rbr.rb_node; rbp; ) { for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
epi = rb_entry(rbp, struct epitem, rbn); epi = rb_entry(rbp, struct epitem, rbn);
kcmp = ep_cmp_ffd(&ffd, &epi->ffd); kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
if (kcmp > 0) if (kcmp > 0)
...@@ -1088,7 +1088,7 @@ static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long t ...@@ -1088,7 +1088,7 @@ static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long t
struct rb_node *rbp; struct rb_node *rbp;
struct epitem *epi; struct epitem *epi;
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn); epi = rb_entry(rbp, struct epitem, rbn);
if (epi->ffd.fd == tfd) { if (epi->ffd.fd == tfd) {
if (toff == 0) if (toff == 0)
...@@ -1273,20 +1273,22 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, ...@@ -1273,20 +1273,22 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
{ {
int kcmp; int kcmp;
struct rb_node **p = &ep->rbr.rb_node, *parent = NULL; struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
struct epitem *epic; struct epitem *epic;
bool leftmost = true;
while (*p) { while (*p) {
parent = *p; parent = *p;
epic = rb_entry(parent, struct epitem, rbn); epic = rb_entry(parent, struct epitem, rbn);
kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd); kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
if (kcmp > 0) if (kcmp > 0) {
p = &parent->rb_right; p = &parent->rb_right;
else leftmost = false;
} else
p = &parent->rb_left; p = &parent->rb_left;
} }
rb_link_node(&epi->rbn, parent, p); rb_link_node(&epi->rbn, parent, p);
rb_insert_color(&epi->rbn, &ep->rbr); rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
} }
...@@ -1530,7 +1532,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, ...@@ -1530,7 +1532,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
list_del_rcu(&epi->fllink); list_del_rcu(&epi->fllink);
spin_unlock(&tfile->f_lock); spin_unlock(&tfile->f_lock);
rb_erase(&epi->rbn, &ep->rbr); rb_erase_cached(&epi->rbn, &ep->rbr);
error_unregister: error_unregister:
ep_unregister_pollwait(ep, epi); ep_unregister_pollwait(ep, epi);
...@@ -1878,7 +1880,7 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) ...@@ -1878,7 +1880,7 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
mutex_lock_nested(&ep->mtx, call_nests + 1); mutex_lock_nested(&ep->mtx, call_nests + 1);
ep->visited = 1; ep->visited = 1;
list_add(&ep->visited_list_link, &visited_list); list_add(&ep->visited_list_link, &visited_list);
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn); epi = rb_entry(rbp, struct epitem, rbn);
if (unlikely(is_file_epoll(epi->ffd.file))) { if (unlikely(is_file_epoll(epi->ffd.file))) {
ep_tovisit = epi->ffd.file->private_data; ep_tovisit = epi->ffd.file->private_data;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册