提交 38453a53 编写于 作者: J Jens Axboe 提交者: Joseph Qi

io_uring: extend batch freeing to cover more cases

to #26323588

commit c6ca97b30c47c7ad36107d3764bb4dc37026d171 upstream.

Currently we only batch free if fixed files are used, no links, no aux
data, etc. This extends the batch freeing to only exclude the linked
case and fallback case, and make io_free_req_many() handle the other
cases just fine.
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Acked-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 479df09a
...@@ -1132,21 +1132,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, ...@@ -1132,21 +1132,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
return NULL; return NULL;
} }
struct req_batch {
void *reqs[IO_IOPOLL_BATCH];
int to_free;
};
static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
{
if (!rb->to_free)
return;
kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
percpu_ref_put_many(&ctx->refs, rb->to_free);
percpu_ref_put_many(&ctx->file_data->refs, rb->to_free);
rb->to_free = 0;
}
static void __io_req_do_free(struct io_kiocb *req) static void __io_req_do_free(struct io_kiocb *req)
{ {
if (likely(!io_is_fallback_req(req))) if (likely(!io_is_fallback_req(req)))
...@@ -1155,7 +1140,7 @@ static void __io_req_do_free(struct io_kiocb *req) ...@@ -1155,7 +1140,7 @@ static void __io_req_do_free(struct io_kiocb *req)
clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req); clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
} }
static void __io_free_req(struct io_kiocb *req) static void __io_req_aux_free(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -1167,7 +1152,14 @@ static void __io_free_req(struct io_kiocb *req) ...@@ -1167,7 +1152,14 @@ static void __io_free_req(struct io_kiocb *req)
else else
fput(req->file); fput(req->file);
} }
}
static void __io_free_req(struct io_kiocb *req)
{
__io_req_aux_free(req);
if (req->flags & REQ_F_INFLIGHT) { if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ctx->inflight_lock, flags); spin_lock_irqsave(&ctx->inflight_lock, flags);
...@@ -1181,6 +1173,56 @@ static void __io_free_req(struct io_kiocb *req) ...@@ -1181,6 +1173,56 @@ static void __io_free_req(struct io_kiocb *req)
__io_req_do_free(req); __io_req_do_free(req);
} }
struct req_batch {
void *reqs[IO_IOPOLL_BATCH];
int to_free;
int need_iter;
};
static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
{
if (!rb->to_free)
return;
if (rb->need_iter) {
int i, inflight = 0;
unsigned long flags;
for (i = 0; i < rb->to_free; i++) {
struct io_kiocb *req = rb->reqs[i];
if (req->flags & REQ_F_FIXED_FILE)
req->file = NULL;
if (req->flags & REQ_F_INFLIGHT)
inflight++;
else
rb->reqs[i] = NULL;
__io_req_aux_free(req);
}
if (!inflight)
goto do_free;
spin_lock_irqsave(&ctx->inflight_lock, flags);
for (i = 0; i < rb->to_free; i++) {
struct io_kiocb *req = rb->reqs[i];
if (req) {
list_del(&req->inflight_entry);
if (!--inflight)
break;
}
}
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
if (waitqueue_active(&ctx->inflight_wait))
wake_up(&ctx->inflight_wait);
}
do_free:
kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
percpu_ref_put_many(&ctx->refs, rb->to_free);
percpu_ref_put_many(&ctx->file_data->refs, rb->to_free);
rb->to_free = rb->need_iter = 0;
}
static bool io_link_cancel_timeout(struct io_kiocb *req) static bool io_link_cancel_timeout(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -1378,20 +1420,16 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) ...@@ -1378,20 +1420,16 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req) static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
{ {
/* if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
* If we're not using fixed files, we have to pair the completion part return false;
* with the file put. Use regular completions for those, only batch
* free for fixed file and non-linked commands.
*/
if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) == REQ_F_FIXED_FILE)
&& !io_is_fallback_req(req) && !req->io) {
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
io_free_req_many(req->ctx, rb);
return true;
}
return false; if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
rb->need_iter++;
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
io_free_req_many(req->ctx, rb);
return true;
} }
/* /*
...@@ -1403,7 +1441,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -1403,7 +1441,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
struct req_batch rb; struct req_batch rb;
struct io_kiocb *req; struct io_kiocb *req;
rb.to_free = 0; rb.to_free = rb.need_iter = 0;
while (!list_empty(done)) { while (!list_empty(done)) {
req = list_first_entry(done, struct io_kiocb, list); req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list); list_del(&req->list);
...@@ -3223,7 +3261,7 @@ static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes) ...@@ -3223,7 +3261,7 @@ static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
struct io_kiocb *req, *tmp; struct io_kiocb *req, *tmp;
struct req_batch rb; struct req_batch rb;
rb.to_free = 0; rb.to_free = rb.need_iter = 0;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
llist_for_each_entry_safe(req, tmp, nodes, llist_node) { llist_for_each_entry_safe(req, tmp, nodes, llist_node) {
hash_del(&req->hash_node); hash_del(&req->hash_node);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册