diff --git a/fs/io-wq.c b/fs/io-wq.c index 8cc58f02dc007a38ddae623c0c9729e19ba0394b..c3b6b79949baedebe99b30d64d434df6041c16cf 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -973,9 +973,7 @@ void io_wq_flush(struct io_wq *wq) } } -struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, - struct user_struct *user, get_work_fn *get_work, - put_work_fn *put_work) +struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) { int ret = -ENOMEM, i, node; struct io_wq *wq; @@ -991,11 +989,11 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, return ERR_PTR(-ENOMEM); } - wq->get_work = get_work; - wq->put_work = put_work; + wq->get_work = data->get_work; + wq->put_work = data->put_work; /* caller must already hold a reference to this */ - wq->user = user; + wq->user = data->user; i = 0; refcount_set(&wq->refs, wq->nr_wqes); @@ -1009,7 +1007,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, wqe->node = node; wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0); - if (user) { + if (wq->user) { wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = task_rlimit(current, RLIMIT_NPROC); } @@ -1032,7 +1030,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, goto err; /* caller must have already done mmgrab() on this mm */ - wq->mm = mm; + wq->mm = data->mm; wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager"); if (!IS_ERR(wq->manager)) { diff --git a/fs/io-wq.h b/fs/io-wq.h index 4b29f922f80cfc2b916374eedf788a8526b93861..8ba403c11327554422460c205c83f5db49db4054 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -38,9 +38,15 @@ struct io_wq_work { typedef void (get_work_fn)(struct io_wq_work *); typedef void (put_work_fn)(struct io_wq_work *); -struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, - struct user_struct *user, - get_work_fn *get_work, put_work_fn *put_work); +struct io_wq_data { + struct mm_struct *mm; + struct user_struct *user; + + get_work_fn *get_work; + put_work_fn *put_work; +}; + +struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); void io_wq_destroy(struct io_wq *wq); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); diff --git a/fs/io_uring.c b/fs/io_uring.c index 1819daed8e7452eb49285fdb5fe699a91197606a..279d630b83ece9427214ef5fa226eb331d05a2a9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3842,6 +3842,7 @@ static void io_get_work(struct io_wq_work *work) static int io_sq_offload_start(struct io_ring_ctx *ctx, struct io_uring_params *p) { + struct io_wq_data data; unsigned concurrency; int ret; @@ -3886,10 +3887,14 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, goto err; } + data.mm = ctx->sqo_mm; + data.user = ctx->user; + data.get_work = io_get_work; + data.put_work = io_put_work; + /* Do QD, or 4 * CPUS, whatever is smallest */ concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user, - io_get_work, io_put_work); + ctx->io_wq = io_wq_create(concurrency, &data); if (IS_ERR(ctx->io_wq)) { ret = PTR_ERR(ctx->io_wq); ctx->io_wq = NULL;