提交 844cf850 编写于 作者: J Jens Axboe 提交者: Joseph Qi

io-wq: have io_wq_create() take a 'data' argument

commit 576a347b7af8abfbddc80783fb6629c2894d036e upstream.

We currently pass in 4 arguments outside of the bounded size. In
preparation for adding one more argument, let's bundle them up in
a struct to make it more readable.

No functional changes in this patch.
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Acked-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 212ecbde
...@@ -973,9 +973,7 @@ void io_wq_flush(struct io_wq *wq) ...@@ -973,9 +973,7 @@ void io_wq_flush(struct io_wq *wq)
} }
} }
struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
struct user_struct *user, get_work_fn *get_work,
put_work_fn *put_work)
{ {
int ret = -ENOMEM, i, node; int ret = -ENOMEM, i, node;
struct io_wq *wq; struct io_wq *wq;
...@@ -991,11 +989,11 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, ...@@ -991,11 +989,11 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
wq->get_work = get_work; wq->get_work = data->get_work;
wq->put_work = put_work; wq->put_work = data->put_work;
/* caller must already hold a reference to this */ /* caller must already hold a reference to this */
wq->user = user; wq->user = data->user;
i = 0; i = 0;
refcount_set(&wq->refs, wq->nr_wqes); refcount_set(&wq->refs, wq->nr_wqes);
...@@ -1009,7 +1007,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, ...@@ -1009,7 +1007,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
wqe->node = node; wqe->node = node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0); atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
if (user) { if (wq->user) {
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
task_rlimit(current, RLIMIT_NPROC); task_rlimit(current, RLIMIT_NPROC);
} }
...@@ -1032,7 +1030,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, ...@@ -1032,7 +1030,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
goto err; goto err;
/* caller must have already done mmgrab() on this mm */ /* caller must have already done mmgrab() on this mm */
wq->mm = mm; wq->mm = data->mm;
wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager"); wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
if (!IS_ERR(wq->manager)) { if (!IS_ERR(wq->manager)) {
......
...@@ -38,9 +38,15 @@ struct io_wq_work { ...@@ -38,9 +38,15 @@ struct io_wq_work {
typedef void (get_work_fn)(struct io_wq_work *); typedef void (get_work_fn)(struct io_wq_work *);
typedef void (put_work_fn)(struct io_wq_work *); typedef void (put_work_fn)(struct io_wq_work *);
struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, struct io_wq_data {
struct user_struct *user, struct mm_struct *mm;
get_work_fn *get_work, put_work_fn *put_work); struct user_struct *user;
get_work_fn *get_work;
put_work_fn *put_work;
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
void io_wq_destroy(struct io_wq *wq); void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
......
...@@ -3842,6 +3842,7 @@ static void io_get_work(struct io_wq_work *work) ...@@ -3842,6 +3842,7 @@ static void io_get_work(struct io_wq_work *work)
static int io_sq_offload_start(struct io_ring_ctx *ctx, static int io_sq_offload_start(struct io_ring_ctx *ctx,
struct io_uring_params *p) struct io_uring_params *p)
{ {
struct io_wq_data data;
unsigned concurrency; unsigned concurrency;
int ret; int ret;
...@@ -3886,10 +3887,14 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, ...@@ -3886,10 +3887,14 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
goto err; goto err;
} }
data.mm = ctx->sqo_mm;
data.user = ctx->user;
data.get_work = io_get_work;
data.put_work = io_put_work;
/* Do QD, or 4 * CPUS, whatever is smallest */ /* Do QD, or 4 * CPUS, whatever is smallest */
concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user, ctx->io_wq = io_wq_create(concurrency, &data);
io_get_work, io_put_work);
if (IS_ERR(ctx->io_wq)) { if (IS_ERR(ctx->io_wq)) {
ret = PTR_ERR(ctx->io_wq); ret = PTR_ERR(ctx->io_wq);
ctx->io_wq = NULL; ctx->io_wq = NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册