diff --git a/io_uring/cancel.c b/io_uring/cancel.c index a253e2ad22ebd017f12e369210aafed7965dfc07..f28f0a7d127240dc65be01a666f1d3048b761249 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -193,12 +193,12 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) return IOU_OK; } -void init_hash_table(struct io_hash_bucket *hash_table, unsigned size) +void init_hash_table(struct io_hash_table *table, unsigned size) { unsigned int i; for (i = 0; i < size; i++) { - spin_lock_init(&hash_table[i].lock); - INIT_HLIST_HEAD(&hash_table[i].list); + spin_lock_init(&table->hbs[i].lock); + INIT_HLIST_HEAD(&table->hbs[i].list); } } diff --git a/io_uring/cancel.h b/io_uring/cancel.h index 556a7dcf160e47e45d077530866cc6c2ed100ac1..fd4cb1a2595de5171aeb95ed9a41620d19fb3245 100644 --- a/io_uring/cancel.h +++ b/io_uring/cancel.h @@ -4,9 +4,4 @@ int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags); int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd); -void init_hash_table(struct io_hash_bucket *hash_table, unsigned size); - -struct io_hash_bucket { - spinlock_t lock; - struct hlist_head list; -} ____cacheline_aligned_in_smp; +void init_hash_table(struct io_hash_table *table, unsigned size); diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index f941c73f550259bb0ef4d7fd1c43b76978db32c7..344e7d90d55756d19c07130aed20c3a7581d6eb6 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -158,8 +158,8 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, mutex_unlock(&ctx->uring_lock); seq_puts(m, "PollList:\n"); - for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { - struct io_hash_bucket *hb = &ctx->cancel_hash[i]; + for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) { + struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; struct io_kiocb *req; spin_lock(&hb->lock); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 06772139b7dad883a070ba21d95ccf3db2036f8e..0b3851a0db2ba145ebb473d477a4838ebbbe32b8 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -241,11 +241,23 @@ static __cold void io_fallback_req_func(struct work_struct *work) percpu_ref_put(&ctx->refs); } +static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) +{ + unsigned hash_buckets = 1U << bits; + size_t hash_size = hash_buckets * sizeof(table->hbs[0]); + + table->hbs = kmalloc(hash_size, GFP_KERNEL); + if (!table->hbs) + return -ENOMEM; + + table->hash_bits = bits; + init_hash_table(table, hash_buckets); + return 0; +} + static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; - unsigned hash_buckets; - size_t hash_size; int hash_bits; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -261,16 +273,9 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) */ hash_bits = ilog2(p->cq_entries) - 5; hash_bits = clamp(hash_bits, 1, 8); - hash_buckets = 1U << hash_bits; - hash_size = hash_buckets * sizeof(struct io_hash_bucket); - - ctx->cancel_hash_bits = hash_bits; - ctx->cancel_hash = kmalloc(hash_size, GFP_KERNEL); - if (!ctx->cancel_hash) + if (io_alloc_hash_table(&ctx->cancel_table, hash_bits)) goto err; - init_hash_table(ctx->cancel_hash, hash_buckets); - ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL); if (!ctx->dummy_ubuf) goto err; @@ -311,7 +316,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) return ctx; err: kfree(ctx->dummy_ubuf); - kfree(ctx->cancel_hash); + kfree(ctx->cancel_table.hbs); kfree(ctx->io_bl); xa_destroy(&ctx->io_bl_xa); kfree(ctx); @@ -2487,7 +2492,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) io_req_caches_free(ctx); if (ctx->hash_map) io_wq_put_hash(ctx->hash_map); - kfree(ctx->cancel_hash); + kfree(ctx->cancel_table.hbs); kfree(ctx->dummy_ubuf); kfree(ctx->io_bl); xa_destroy(&ctx->io_bl_xa); diff --git a/io_uring/io_uring_types.h b/io_uring/io_uring_types.h index 8b00243abf65fa0a2ef578aa3bde7e530461f349..d3b9bde9c702c62c9ec3ceb3f1629a2b25e1db54 100644 --- a/io_uring/io_uring_types.h +++ b/io_uring/io_uring_types.h @@ -9,6 +9,16 @@ #include "io-wq.h" #include "filetable.h" +struct io_hash_bucket { + spinlock_t lock; + struct hlist_head list; +} ____cacheline_aligned_in_smp; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned hash_bits; +}; + struct io_uring { u32 head ____cacheline_aligned_in_smp; u32 tail ____cacheline_aligned_in_smp; @@ -224,8 +234,7 @@ struct io_ring_ctx { * manipulate the list, hence no extra locking is needed there. */ struct io_wq_work_list iopoll_list; - struct io_hash_bucket *cancel_hash; - unsigned cancel_hash_bits; + struct io_hash_table cancel_table; bool poll_multi_queue; struct list_head io_buffers_comp; diff --git a/io_uring/poll.c b/io_uring/poll.c index 96199c999fe60c39f732aaae4fe10c574c9e5ea3..ea6466388ed9efd139f3ae25deed6aee9f0fe2f9 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -73,9 +73,9 @@ static struct io_poll *io_poll_get_single(struct io_kiocb *req) static void io_poll_req_insert(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; - u32 index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits); - struct io_hash_bucket *hb = &ctx->cancel_hash[index]; + struct io_hash_table *table = &req->ctx->cancel_table; + u32 index = hash_long(req->cqe.user_data, table->hash_bits); + struct io_hash_bucket *hb = &table->hbs[index]; spin_lock(&hb->lock); hlist_add_head(&req->hash_node, &hb->list); @@ -84,8 +84,9 @@ static void io_poll_req_insert(struct io_kiocb *req) static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) { - u32 index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits); - spinlock_t *lock = &ctx->cancel_hash[index].lock; + struct io_hash_table *table = &req->ctx->cancel_table; + u32 index = hash_long(req->cqe.user_data, table->hash_bits); + spinlock_t *lock = &table->hbs[index].lock; spin_lock(lock); hash_del(&req->hash_node); @@ -539,13 +540,15 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, bool cancel_all) { + struct io_hash_table *table = &ctx->cancel_table; + unsigned nr_buckets = 1U << table->hash_bits; struct hlist_node *tmp; struct io_kiocb *req; bool found = false; int i; - for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { - struct io_hash_bucket *hb = &ctx->cancel_hash[i]; + for (i = 0; i < nr_buckets; i++) { + struct io_hash_bucket *hb = &table->hbs[i]; spin_lock(&hb->lock); hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { @@ -562,12 +565,12 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, struct io_cancel_data *cd, - struct io_hash_bucket hash_table[], + struct io_hash_table *table, struct io_hash_bucket **out_bucket) { struct io_kiocb *req; - u32 index = hash_long(cd->data, ctx->cancel_hash_bits); - struct io_hash_bucket *hb = &hash_table[index]; + u32 index = hash_long(cd->data, table->hash_bits); + struct io_hash_bucket *hb = &table->hbs[index]; *out_bucket = NULL; @@ -591,16 +594,17 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, struct io_cancel_data *cd, - struct io_hash_bucket hash_table[], + struct io_hash_table *table, struct io_hash_bucket **out_bucket) { + unsigned nr_buckets = 1U << table->hash_bits; struct io_kiocb *req; int i; *out_bucket = NULL; - for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { - struct io_hash_bucket *hb = &hash_table[i]; + for (i = 0; i < nr_buckets; i++) { + struct io_hash_bucket *hb = &table->hbs[i]; spin_lock(&hb->lock); hlist_for_each_entry(req, &hb->list, hash_node) { @@ -628,15 +632,15 @@ static bool io_poll_disarm(struct io_kiocb *req) } static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, - struct io_hash_bucket hash_table[]) + struct io_hash_table *table) { struct io_hash_bucket *bucket; struct io_kiocb *req; if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY)) - req = io_poll_file_find(ctx, cd, ctx->cancel_hash, &bucket); + req = io_poll_file_find(ctx, cd, table, &bucket); else - req = io_poll_find(ctx, false, cd, ctx->cancel_hash, &bucket); + req = io_poll_find(ctx, false, cd, table, &bucket); if (req) io_poll_cancel_req(req); @@ -647,7 +651,7 @@ static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) { - return __io_poll_cancel(ctx, cd, ctx->cancel_hash); + return __io_poll_cancel(ctx, cd, &ctx->cancel_table); } static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, @@ -745,7 +749,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) int ret2, ret = 0; bool locked; - preq = io_poll_find(ctx, true, &cd, ctx->cancel_hash, &bucket); + preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); if (preq) ret2 = io_poll_disarm(preq); if (bucket)