提交 f749f220 编写于 作者: P Pavel Begunkov 提交者: Xiaoguang Wang

io-wq: hash dependent work

to #28170604

commit 60cf46ae605446feb0c43c472c0fd1af4cd96231 upstream

Enable io-wq hashing stuff for dependent works simply by re-enqueueing
such requests.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 9fc5877e
...@@ -376,11 +376,17 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) ...@@ -376,11 +376,17 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
return __io_worker_unuse(wqe, worker); return __io_worker_unuse(wqe, worker);
} }
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash) static inline unsigned int io_get_work_hash(struct io_wq_work *work)
{
return work->flags >> IO_WQ_HASH_SHIFT;
}
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
__must_hold(wqe->lock) __must_hold(wqe->lock)
{ {
struct io_wq_work_node *node, *prev; struct io_wq_work_node *node, *prev;
struct io_wq_work *work; struct io_wq_work *work;
unsigned int hash;
wq_list_for_each(node, prev, &wqe->work_list) { wq_list_for_each(node, prev, &wqe->work_list) {
work = container_of(node, struct io_wq_work, list); work = container_of(node, struct io_wq_work, list);
...@@ -392,9 +398,9 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash) ...@@ -392,9 +398,9 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
} }
/* hashed, can run if not already running */ /* hashed, can run if not already running */
*hash = work->flags >> IO_WQ_HASH_SHIFT; hash = io_get_work_hash(work);
if (!(wqe->hash_map & BIT(*hash))) { if (!(wqe->hash_map & BIT(hash))) {
wqe->hash_map |= BIT(*hash); wqe->hash_map |= BIT(hash);
wq_node_del(&wqe->work_list, node, prev); wq_node_del(&wqe->work_list, node, prev);
return work; return work;
} }
...@@ -471,15 +477,17 @@ static void io_assign_current_work(struct io_worker *worker, ...@@ -471,15 +477,17 @@ static void io_assign_current_work(struct io_worker *worker,
spin_unlock_irq(&worker->lock); spin_unlock_irq(&worker->lock);
} }
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
static void io_worker_handle_work(struct io_worker *worker) static void io_worker_handle_work(struct io_worker *worker)
__releases(wqe->lock) __releases(wqe->lock)
{ {
struct io_wqe *wqe = worker->wqe; struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq; struct io_wq *wq = wqe->wq;
unsigned hash = -1U;
do { do {
struct io_wq_work *work; struct io_wq_work *work;
unsigned int hash;
get_next: get_next:
/* /*
* If we got some work, mark us as busy. If we didn't, but * If we got some work, mark us as busy. If we didn't, but
...@@ -488,7 +496,7 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -488,7 +496,7 @@ static void io_worker_handle_work(struct io_worker *worker)
* can't make progress, any work completion or insertion will * can't make progress, any work completion or insertion will
* clear the stalled flag. * clear the stalled flag.
*/ */
work = io_get_next_work(wqe, &hash); work = io_get_next_work(wqe);
if (work) if (work)
__io_worker_busy(wqe, worker, work); __io_worker_busy(wqe, worker, work);
else if (!wq_list_empty(&wqe->work_list)) else if (!wq_list_empty(&wqe->work_list))
...@@ -512,11 +520,16 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -512,11 +520,16 @@ static void io_worker_handle_work(struct io_worker *worker)
work->flags |= IO_WQ_WORK_CANCEL; work->flags |= IO_WQ_WORK_CANCEL;
old_work = work; old_work = work;
hash = io_get_work_hash(work);
work->func(&work); work->func(&work);
work = (old_work == work) ? NULL : work; work = (old_work == work) ? NULL : work;
io_assign_current_work(worker, work); io_assign_current_work(worker, work);
wq->free_work(old_work); wq->free_work(old_work);
if (work && io_wq_is_hashed(work)) {
io_wqe_enqueue(wqe, work);
work = NULL;
}
if (hash != -1U) { if (hash != -1U) {
spin_lock_irq(&wqe->lock); spin_lock_irq(&wqe->lock);
wqe->hash_map &= ~BIT_ULL(hash); wqe->hash_map &= ~BIT_ULL(hash);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册