提交 39e5e106 编写于 作者: P Pavel Begunkov 提交者: Xiaoguang Wang

io_uring: off timeouts based only on completions

to #28736503

commit bfe68a221905de37e65394a6d58c1e5f3e545d2f upstream

Offset timeouts wait not for sqe->off non-timeout CQEs, but rather
sqe->off + number of prior inflight requests. Wait exactly for
sqe->off non-timeout completions
Reported-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 58825d4e
...@@ -384,7 +384,8 @@ struct io_timeout { ...@@ -384,7 +384,8 @@ struct io_timeout {
struct file *file; struct file *file;
u64 addr; u64 addr;
int flags; int flags;
u32 count; u32 off;
u32 target_seq;
}; };
struct io_rw { struct io_rw {
...@@ -1121,8 +1122,10 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) ...@@ -1121,8 +1122,10 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
if (req->flags & REQ_F_TIMEOUT_NOSEQ) if (req->flags & REQ_F_TIMEOUT_NOSEQ)
break; break;
if (__req_need_defer(req)) if (req->timeout.target_seq != ctx->cached_cq_tail
- atomic_read(&ctx->cq_timeouts))
break; break;
list_del_init(&req->list); list_del_init(&req->list);
io_kill_timeout(req); io_kill_timeout(req);
} }
...@@ -4585,20 +4588,8 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) ...@@ -4585,20 +4588,8 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
* We could be racing with timeout deletion. If the list is empty, * We could be racing with timeout deletion. If the list is empty,
* then timeout lookup already found it and will be handling it. * then timeout lookup already found it and will be handling it.
*/ */
if (!list_empty(&req->list)) { if (!list_empty(&req->list))
struct io_kiocb *prev;
/*
* Adjust the reqs sequence before the current one because it
* will consume a slot in the cq_ring and the cq_tail
* pointer will be increased, otherwise other timeout reqs may
* return in advance without waiting for enough wait_nr.
*/
prev = req;
list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
prev->sequence++;
list_del_init(&req->list); list_del_init(&req->list);
}
io_cqring_fill_event(req, -ETIME); io_cqring_fill_event(req, -ETIME);
io_commit_cqring(ctx); io_commit_cqring(ctx);
...@@ -4690,7 +4681,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -4690,7 +4681,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (flags & ~IORING_TIMEOUT_ABS) if (flags & ~IORING_TIMEOUT_ABS)
return -EINVAL; return -EINVAL;
req->timeout.count = off; req->timeout.off = off;
if (!req->io && io_alloc_async_ctx(req)) if (!req->io && io_alloc_async_ctx(req))
return -ENOMEM; return -ENOMEM;
...@@ -4714,13 +4705,10 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -4714,13 +4705,10 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
static int io_timeout(struct io_kiocb *req) static int io_timeout(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data; struct io_timeout_data *data = &req->io->timeout;
struct list_head *entry; struct list_head *entry;
unsigned span = 0; u32 tail, off = req->timeout.off;
u32 count = req->timeout.count;
u32 seq = req->sequence;
data = &req->io->timeout;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
/* /*
...@@ -4728,13 +4716,14 @@ static int io_timeout(struct io_kiocb *req) ...@@ -4728,13 +4716,14 @@ static int io_timeout(struct io_kiocb *req)
* timeout event to be satisfied. If it isn't set, then this is * timeout event to be satisfied. If it isn't set, then this is
* a pure timeout request, sequence isn't used. * a pure timeout request, sequence isn't used.
*/ */
if (!count) { if (!off) {
req->flags |= REQ_F_TIMEOUT_NOSEQ; req->flags |= REQ_F_TIMEOUT_NOSEQ;
entry = ctx->timeout_list.prev; entry = ctx->timeout_list.prev;
goto add; goto add;
} }
req->sequence = seq + count; tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
req->timeout.target_seq = tail + off;
/* /*
* Insertion sort, ensuring the first entry in the list is always * Insertion sort, ensuring the first entry in the list is always
...@@ -4742,39 +4731,13 @@ static int io_timeout(struct io_kiocb *req) ...@@ -4742,39 +4731,13 @@ static int io_timeout(struct io_kiocb *req)
*/ */
list_for_each_prev(entry, &ctx->timeout_list) { list_for_each_prev(entry, &ctx->timeout_list) {
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
unsigned nxt_seq;
long long tmp, tmp_nxt;
u32 nxt_offset = nxt->timeout.count;
if (nxt->flags & REQ_F_TIMEOUT_NOSEQ) if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
continue; continue;
/* nxt.seq is behind @tail, otherwise would've been completed */
/* if (off >= nxt->timeout.target_seq - tail)
* Since seq + count can overflow, use type long
* long to store it.
*/
tmp = (long long)seq + count;
nxt_seq = nxt->sequence - nxt_offset;
tmp_nxt = (long long)nxt_seq + nxt_offset;
/*
* cached_sq_head may overflow, and it will never overflow twice
* once there is some timeout req still be valid.
*/
if (seq < nxt_seq)
tmp += UINT_MAX;
if (tmp > tmp_nxt)
break; break;
/*
* Sequence of reqs after the insert one and itself should
* be adjusted because each timeout req consumes a slot.
*/
span++;
nxt->sequence++;
} }
req->sequence -= span;
add: add:
list_add(&req->list, entry); list_add(&req->list, entry);
data->timer.function = io_timeout_fn; data->timer.function = io_timeout_fn;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册