提交 e944475e 编写于 作者: J Jens Axboe

io_uring: make poll->wait dynamically allocated

In the quest to bring io_kiocb down to 3 cachelines, this one does
the trick. Make the wait_queue_entry for the poll command come out
of kmalloc instead of embedding it in struct io_poll_iocb, as the
latter is the largest member of io_kiocb. Once we trim this down a
bit, we're back at a healthy 192 bytes for struct io_kiocb.
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 6206f0e1
...@@ -291,7 +291,7 @@ struct io_poll_iocb { ...@@ -291,7 +291,7 @@ struct io_poll_iocb {
__poll_t events; __poll_t events;
bool done; bool done;
bool canceled; bool canceled;
struct wait_queue_entry wait; struct wait_queue_entry *wait;
}; };
struct io_timeout_data { struct io_timeout_data {
...@@ -2030,8 +2030,8 @@ static void io_poll_remove_one(struct io_kiocb *req) ...@@ -2030,8 +2030,8 @@ static void io_poll_remove_one(struct io_kiocb *req)
spin_lock(&poll->head->lock); spin_lock(&poll->head->lock);
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) { if (!list_empty(&poll->wait->entry)) {
list_del_init(&poll->wait.entry); list_del_init(&poll->wait->entry);
io_queue_async_work(req); io_queue_async_work(req);
} }
spin_unlock(&poll->head->lock); spin_unlock(&poll->head->lock);
...@@ -2104,6 +2104,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) ...@@ -2104,6 +2104,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
req->poll.done = true; req->poll.done = true;
kfree(req->poll.wait);
if (error) if (error)
io_cqring_fill_event(req, error); io_cqring_fill_event(req, error);
else else
...@@ -2141,7 +2142,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr) ...@@ -2141,7 +2142,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
*/ */
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (!mask && ret != -ECANCELED) { if (!mask && ret != -ECANCELED) {
add_wait_queue(poll->head, &poll->wait); add_wait_queue(poll->head, poll->wait);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
return; return;
} }
...@@ -2161,8 +2162,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr) ...@@ -2161,8 +2162,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key) void *key)
{ {
struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb, struct io_poll_iocb *poll = wait->private;
wait);
struct io_kiocb *req = container_of(poll, struct io_kiocb, poll); struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
__poll_t mask = key_to_poll(key); __poll_t mask = key_to_poll(key);
...@@ -2172,7 +2172,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -2172,7 +2172,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & poll->events)) if (mask && !(mask & poll->events))
return 0; return 0;
list_del_init(&poll->wait.entry); list_del_init(&poll->wait->entry);
/* /*
* Run completion inline if we can. We're using trylock here because * Run completion inline if we can. We're using trylock here because
...@@ -2213,7 +2213,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, ...@@ -2213,7 +2213,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
pt->error = 0; pt->error = 0;
pt->req->poll.head = head; pt->req->poll.head = head;
add_wait_queue(head, &pt->req->poll.wait); add_wait_queue(head, pt->req->poll.wait);
} }
static void io_poll_req_insert(struct io_kiocb *req) static void io_poll_req_insert(struct io_kiocb *req)
...@@ -2252,6 +2252,10 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2252,6 +2252,10 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (!poll->file) if (!poll->file)
return -EBADF; return -EBADF;
poll->wait = kmalloc(sizeof(*poll->wait), GFP_KERNEL);
if (!poll->wait)
return -ENOMEM;
req->sqe = NULL; req->sqe = NULL;
INIT_IO_WORK(&req->work, io_poll_complete_work); INIT_IO_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events); events = READ_ONCE(sqe->poll_events);
...@@ -2268,8 +2272,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2268,8 +2272,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */ /* initialized the list so that we can do list_empty checks */
INIT_LIST_HEAD(&poll->wait.entry); INIT_LIST_HEAD(&poll->wait->entry);
init_waitqueue_func_entry(&poll->wait, io_poll_wake); init_waitqueue_func_entry(poll->wait, io_poll_wake);
poll->wait->private = poll;
INIT_LIST_HEAD(&req->list); INIT_LIST_HEAD(&req->list);
...@@ -2278,14 +2283,14 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2278,14 +2283,14 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (likely(poll->head)) { if (likely(poll->head)) {
spin_lock(&poll->head->lock); spin_lock(&poll->head->lock);
if (unlikely(list_empty(&poll->wait.entry))) { if (unlikely(list_empty(&poll->wait->entry))) {
if (ipt.error) if (ipt.error)
cancel = true; cancel = true;
ipt.error = 0; ipt.error = 0;
mask = 0; mask = 0;
} }
if (mask || ipt.error) if (mask || ipt.error)
list_del_init(&poll->wait.entry); list_del_init(&poll->wait->entry);
else if (cancel) else if (cancel)
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
else if (!poll->done) /* actually waiting for an event */ else if (!poll->done) /* actually waiting for an event */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册