提交 6cd8d26c 编写于 作者: J Jens Axboe 提交者: Cheng Jian

io_uring: defer file table grabbing request cleanup for locked requests

mainline inclusion
from mainline-5.9-rc1
commit 51a4cc11
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27
CVE: NA
---------------------------

If we're in the error path failing links and we have a link that has
grabbed a reference to the fs_struct, then we cannot safely drop our
reference to the table if we already hold the completion lock. This
adds a hardirq dependency to the fs_struct->lock, which it currently
doesn't have.

Defer the final cleanup and free of such requests to avoid adding this
dependency.

Reported-by: syzbot+ef4b654b49ed7ff049bf@syzkaller.appspotmail.com
Signed-off-by: NJens Axboe <axboe@kernel.dk>

Conflicts:
	fs/io_uring.c
[adopt patch ecfc5177 ("io_uring: fix potential use after free on
fallback request free") lead this conflict]
Signed-off-by: Nyangerkun <yangerkun@huawei.com>
Reviewed-by: Nzhangyi (F) <yi.zhang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 a72d1bb7
...@@ -1167,10 +1167,16 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -1167,10 +1167,16 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
} }
} }
static void io_req_clean_work(struct io_kiocb *req) /*
* Returns true if we need to defer file table putting. This can only happen
* from the error path with REQ_F_COMP_LOCKED set.
*/
static bool io_req_clean_work(struct io_kiocb *req)
{ {
if (!(req->flags & REQ_F_WORK_INITIALIZED)) if (!(req->flags & REQ_F_WORK_INITIALIZED))
return; return false;
req->flags &= ~REQ_F_WORK_INITIALIZED;
if (req->work.mm) { if (req->work.mm) {
mmdrop(req->work.mm); mmdrop(req->work.mm);
...@@ -1183,6 +1189,9 @@ static void io_req_clean_work(struct io_kiocb *req) ...@@ -1183,6 +1189,9 @@ static void io_req_clean_work(struct io_kiocb *req)
if (req->work.fs) { if (req->work.fs) {
struct fs_struct *fs = req->work.fs; struct fs_struct *fs = req->work.fs;
if (req->flags & REQ_F_COMP_LOCKED)
return true;
spin_lock(&req->work.fs->lock); spin_lock(&req->work.fs->lock);
if (--fs->users) if (--fs->users)
fs = NULL; fs = NULL;
...@@ -1191,7 +1200,8 @@ static void io_req_clean_work(struct io_kiocb *req) ...@@ -1191,7 +1200,8 @@ static void io_req_clean_work(struct io_kiocb *req)
free_fs_struct(fs); free_fs_struct(fs);
req->work.fs = NULL; req->work.fs = NULL;
} }
req->flags &= ~REQ_F_WORK_INITIALIZED;
return false;
} }
static void io_prep_async_work(struct io_kiocb *req) static void io_prep_async_work(struct io_kiocb *req)
...@@ -1626,7 +1636,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file, ...@@ -1626,7 +1636,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
fput(file); fput(file);
} }
static void io_dismantle_req(struct io_kiocb *req) static bool io_dismantle_req(struct io_kiocb *req)
{ {
io_clean_op(req); io_clean_op(req);
...@@ -1634,7 +1644,6 @@ static void io_dismantle_req(struct io_kiocb *req) ...@@ -1634,7 +1644,6 @@ static void io_dismantle_req(struct io_kiocb *req)
kfree(req->io); kfree(req->io);
if (req->file) if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
io_req_clean_work(req);
if (req->flags & REQ_F_INFLIGHT) { if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -1646,13 +1655,14 @@ static void io_dismantle_req(struct io_kiocb *req) ...@@ -1646,13 +1655,14 @@ static void io_dismantle_req(struct io_kiocb *req)
wake_up(&ctx->inflight_wait); wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags); spin_unlock_irqrestore(&ctx->inflight_lock, flags);
} }
return io_req_clean_work(req);
} }
static void __io_free_req(struct io_kiocb *req) static void __io_free_req_finish(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
io_dismantle_req(req);
__io_put_req_task(req); __io_put_req_task(req);
if (likely(!io_is_fallback_req(req))) if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req); kmem_cache_free(req_cachep, req);
...@@ -1661,6 +1671,39 @@ static void __io_free_req(struct io_kiocb *req) ...@@ -1661,6 +1671,39 @@ static void __io_free_req(struct io_kiocb *req)
percpu_ref_put(&ctx->refs); percpu_ref_put(&ctx->refs);
} }
static void io_req_task_file_table_put(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct fs_struct *fs = req->work.fs;
spin_lock(&req->work.fs->lock);
if (--fs->users)
fs = NULL;
spin_unlock(&req->work.fs->lock);
if (fs)
free_fs_struct(fs);
req->work.fs = NULL;
__io_free_req_finish(req);
}
static void __io_free_req(struct io_kiocb *req)
{
if (!io_dismantle_req(req)) {
__io_free_req_finish(req);
} else {
int ret;
init_task_work(&req->task_work, io_req_task_file_table_put);
ret = task_work_add(req->task, &req->task_work, TWA_RESUME);
if (unlikely(ret)) {
struct task_struct *tsk;
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, 0);
}
}
}
static bool io_link_cancel_timeout(struct io_kiocb *req) static bool io_link_cancel_timeout(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -1954,7 +1997,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) ...@@ -1954,7 +1997,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
req->flags &= ~REQ_F_TASK_PINNED; req->flags &= ~REQ_F_TASK_PINNED;
} }
io_dismantle_req(req); WARN_ON_ONCE(io_dismantle_req(req));
rb->reqs[rb->to_free++] = req; rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
__io_req_free_batch_flush(req->ctx, rb); __io_req_free_batch_flush(req->ctx, rb);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册