提交 d01f0594 编写于 作者: A Al Viro

untangling ep_call_nested(): it's all serialized on epmutex.

IOW,
	* no locking is needed to protect the list
	* the list is actually a stack
	* no need to check ->ctx
	* it can bloody well be a static 5-element array - nobody is
going to be accessing it in parallel.
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 8677600d
...@@ -109,25 +109,6 @@ struct epoll_filefd { ...@@ -109,25 +109,6 @@ struct epoll_filefd {
int fd; int fd;
} __packed; } __packed;
/*
* Structure used to track possible nested calls, for too deep recursions
* and loop cycles.
*/
struct nested_call_node {
struct list_head llink;
void *cookie;
void *ctx;
};
/*
* This structure is used as collector for nested calls, to check for
* maximum recursion dept and loop cycles.
*/
struct nested_calls {
struct list_head tasks_call_list;
spinlock_t lock;
};
/* Wait structure used by the poll hooks */ /* Wait structure used by the poll hooks */
struct eppoll_entry { struct eppoll_entry {
/* List header used to link this structure to the "struct epitem" */ /* List header used to link this structure to the "struct epitem" */
...@@ -273,7 +254,8 @@ static DEFINE_MUTEX(epmutex); ...@@ -273,7 +254,8 @@ static DEFINE_MUTEX(epmutex);
static u64 loop_check_gen = 0; static u64 loop_check_gen = 0;
/* Used to check for epoll file descriptor inclusion loops */ /* Used to check for epoll file descriptor inclusion loops */
static struct nested_calls poll_loop_ncalls; static void *cookies[EP_MAX_NESTS + 1];
static int nesting;
/* Slab cache used to allocate "struct epitem" */ /* Slab cache used to allocate "struct epitem" */
static struct kmem_cache *epi_cache __read_mostly; static struct kmem_cache *epi_cache __read_mostly;
...@@ -348,13 +330,6 @@ static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p) ...@@ -348,13 +330,6 @@ static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
return container_of(p, struct eppoll_entry, wait)->base; return container_of(p, struct eppoll_entry, wait)->base;
} }
/* Initialize the poll safe wake up structure */
static void ep_nested_calls_init(struct nested_calls *ncalls)
{
INIT_LIST_HEAD(&ncalls->tasks_call_list);
spin_lock_init(&ncalls->lock);
}
/** /**
* ep_events_available - Checks if ready events might be available. * ep_events_available - Checks if ready events might be available.
* *
...@@ -465,47 +440,20 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi) ...@@ -465,47 +440,20 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
static int ep_call_nested(int (*nproc)(void *, void *, int), void *priv, static int ep_call_nested(int (*nproc)(void *, void *, int), void *priv,
void *cookie) void *cookie)
{ {
int error, call_nests = 0; int error, i;
unsigned long flags;
struct nested_calls *ncalls = &poll_loop_ncalls;
struct list_head *lsthead = &ncalls->tasks_call_list;
struct nested_call_node *tncur;
struct nested_call_node tnode;
spin_lock_irqsave(&ncalls->lock, flags); if (nesting > EP_MAX_NESTS) /* too deep nesting */
return -1;
/* for (i = 0; i < nesting; i++) {
* Try to see if the current task is already inside this wakeup call. if (cookies[i] == cookie) /* loop detected */
* We use a list here, since the population inside this set is always return -1;
* very much limited.
*/
list_for_each_entry(tncur, lsthead, llink) {
if (tncur->ctx == current &&
(tncur->cookie == cookie || ++call_nests > EP_MAX_NESTS)) {
/*
* Ops ... loop detected or maximum nest level reached.
* We abort this wake by breaking the cycle itself.
*/
error = -1;
goto out_unlock;
}
} }
cookies[nesting++] = cookie;
/* Add the current task and cookie to the list */
tnode.ctx = current;
tnode.cookie = cookie;
list_add(&tnode.llink, lsthead);
spin_unlock_irqrestore(&ncalls->lock, flags);
/* Call the nested function */ /* Call the nested function */
error = (*nproc)(priv, cookie, call_nests); error = (*nproc)(priv, cookie, nesting - 1);
nesting--;
/* Remove the current task from the list */
spin_lock_irqsave(&ncalls->lock, flags);
list_del(&tnode.llink);
out_unlock:
spin_unlock_irqrestore(&ncalls->lock, flags);
return error; return error;
} }
...@@ -2379,12 +2327,6 @@ static int __init eventpoll_init(void) ...@@ -2379,12 +2327,6 @@ static int __init eventpoll_init(void)
EP_ITEM_COST; EP_ITEM_COST;
BUG_ON(max_user_watches < 0); BUG_ON(max_user_watches < 0);
/*
* Initialize the structure used to perform epoll file descriptor
* inclusion loops checks.
*/
ep_nested_calls_init(&poll_loop_ncalls);
/* /*
* We can have many thousands of epitems, so prevent this from * We can have many thousands of epitems, so prevent this from
* using an extra cache line on 64-bit (and smaller) CPUs * using an extra cache line on 64-bit (and smaller) CPUs
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册