提交 7244851b 编写于 作者: J Jens Axboe 提交者: Caspar Zhang

block: make blk_poll() take a parameter on whether to spin or not

commit 0a1b8b87d064a47fad9ec475316002da28559207 upstream.

blk_poll() has always kept spinning until it found an IO. This is
fine for SYNC polling, since we need to find one request we have
pending, but in preparation for ASYNC polling it can be beneficial
to just check if we have any entries available or not.

Existing callers are converted to pass in 'spin == true', to retain
the old behavior.
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Reviewed-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Acked-by: NCaspar Zhang <caspar@linux.alibaba.com>
上级 d25f577c
......@@ -2578,19 +2578,22 @@ EXPORT_SYMBOL(submit_bio);
* blk_poll - poll for IO completions
* @q: the queue
* @cookie: cookie passed back at IO submission time
* @spin: whether to spin for completions
*
* Description:
* Poll for completions on the passed in queue. Returns number of
* completed entries found.
* completed entries found. If @spin is true, then blk_poll will continue
* looping until at least one completion is found, unless the task is
* otherwise marked running (or we need to reschedule).
*/
int blk_poll(struct request_queue *q, blk_qc_t cookie)
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
if (!q->poll_fn || !blk_qc_t_valid(cookie))
return 0;
if (current->plug)
blk_flush_plug_list(current->plug, false);
return q->poll_fn(q, cookie);
return q->poll_fn(q, cookie, spin);
}
EXPORT_SYMBOL_GPL(blk_poll);
......
......@@ -37,7 +37,7 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
......@@ -3179,7 +3179,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
return blk_mq_poll_hybrid_sleep(q, hctx, rq);
}
static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
struct blk_mq_hw_ctx *hctx;
long state;
......@@ -3219,7 +3219,7 @@ static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
if (current->state == TASK_RUNNING)
return 1;
if (ret < 0)
if (ret < 0 || !spin)
break;
cpu_relax();
}
......
......@@ -217,7 +217,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
return ret;
}
static int nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
static int nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc, bool spin)
{
struct nvme_ns_head *head = q->queuedata;
struct nvme_ns *ns;
......@@ -227,7 +227,7 @@ static int nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
srcu_idx = srcu_read_lock(&head->srcu);
ns = srcu_dereference(head->current_path, &head->srcu);
if (likely(ns && nvme_path_is_optimized(ns)))
found = ns->queue->poll_fn(q, qc);
found = ns->queue->poll_fn(q, qc, spin);
srcu_read_unlock(&head->srcu, srcu_idx);
return found;
}
......
......@@ -108,7 +108,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
cookie = submit_bio(bio);
blk_poll(bdev_get_queue(req->ns->bdev), cookie);
blk_poll(bdev_get_queue(req->ns->bdev), cookie, true);
}
static void nvmet_bdev_execute_flush(struct nvmet_req *req)
......
......@@ -255,7 +255,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
if (!READ_ONCE(bio.bi_private))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(bdev_get_queue(bdev), qc))
!blk_poll(bdev_get_queue(bdev), qc, true))
io_schedule();
}
__set_current_state(TASK_RUNNING);
......@@ -425,7 +425,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(bdev_get_queue(bdev), qc))
!blk_poll(bdev_get_queue(bdev), qc, true))
io_schedule();
}
__set_current_state(TASK_RUNNING);
......
......@@ -518,7 +518,7 @@ static struct bio *dio_await_one(struct dio *dio)
dio->waiter = current;
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(dio->bio_disk->queue, dio->bio_cookie))
!blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
io_schedule();
/* wake up sets us TASK_RUNNING */
spin_lock_irqsave(&dio->bio_lock, flags);
......
......@@ -1963,7 +1963,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!dio->submit.last_queue ||
!blk_poll(dio->submit.last_queue,
dio->submit.cookie))
dio->submit.cookie, true))
io_schedule();
}
__set_current_state(TASK_RUNNING);
......
......@@ -317,7 +317,7 @@ struct blk_queue_ctx;
typedef void (request_fn_proc) (struct request_queue *q);
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (poll_q_fn) (struct request_queue *q, blk_qc_t);
typedef int (poll_q_fn) (struct request_queue *q, blk_qc_t, bool spin);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
......@@ -1033,7 +1033,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
int blk_poll(struct request_queue *q, blk_qc_t cookie);
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
......
......@@ -409,7 +409,7 @@ int swap_readpage(struct page *page, bool synchronous)
if (!READ_ONCE(bio->bi_private))
break;
if (!blk_poll(disk->queue, qc))
if (!blk_poll(disk->queue, qc, true))
break;
}
__set_current_state(TASK_RUNNING);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册