提交 c6ed5079 编写于 作者: P Pavel Begunkov 提交者: Xiaoguang Wang

io_uring: don't derive close state from ->func

to #28736503

commit 3af73b286ccee493dc055fc58da02b2dc7a5304d upstream

Relying on having a specific work.func is dangerous, even if an opcode
handler set it itself. E.g. io_wq_assign_next() can modify it.

io_close() sets a custom work.func to indicate that
__close_fd_get_file() was already called. Fortunately, there is no bugs
with io_wq_assign_next() and close yet.

Still, do it safe and always be prepared to be called through
io_wq_submit_work(). Zero req->close.put_file in prep, and call
__close_fd_get_file() IFF it's NULL.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 0885bdfb
...@@ -3429,53 +3429,37 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -3429,53 +3429,37 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->close.fd == req->ctx->ring_fd) req->close.fd == req->ctx->ring_fd)
return -EBADF; return -EBADF;
req->close.put_file = NULL;
return 0; return 0;
} }
/* only called when __close_fd_get_file() is done */
static void __io_close_finish(struct io_kiocb *req)
{
int ret;
ret = filp_close(req->close.put_file, req->work.files);
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
fput(req->close.put_file);
io_put_req(req);
}
static void io_close_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
/* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req);
io_steal_work(req, workptr);
}
static int io_close(struct io_kiocb *req, bool force_nonblock) static int io_close(struct io_kiocb *req, bool force_nonblock)
{ {
struct io_close *close = &req->close;
int ret; int ret;
req->close.put_file = NULL; /* might be already done during nonblock submission */
ret = __close_fd_get_file(req->close.fd, &req->close.put_file); if (!close->put_file) {
if (ret < 0) ret = __close_fd_get_file(close->fd, &close->put_file);
return (ret == -ENOENT) ? -EBADF : ret; if (ret < 0)
return (ret == -ENOENT) ? -EBADF : ret;
}
/* if the file has a flush method, be safe and punt to async */ /* if the file has a flush method, be safe and punt to async */
if (req->close.put_file->f_op->flush && force_nonblock) { if (close->put_file->f_op->flush && force_nonblock) {
/* avoid grabbing files - we don't need the files */ /* avoid grabbing files - we don't need the files */
req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT; req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT;
req->work.func = io_close_finish;
return -EAGAIN; return -EAGAIN;
} }
/* /* No ->flush() or already async, safely close from here */
* No ->flush(), safely close from here and just punt the ret = filp_close(close->put_file, req->work.files);
* fput() to async context. if (ret < 0)
*/ req_set_fail_links(req);
__io_close_finish(req); io_cqring_add_event(req, ret);
fput(close->put_file);
close->put_file = NULL;
io_put_req(req);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册