提交 5d7041ee 编写于 作者: J Jens Axboe 提交者: Caspar Zhang

block: sum requests in the plug structure

to #29139300

commit 5f0ed774ed2914decfd397569fface997532e94d upstream

This isn't exactly the same as the previous count, as it includes
requests for all devices. But that really doesn't matter, if we have
more than the threshold (16) queued up, flush it. It's not worth it
to have an expensive list loop for this.

[Hongnan Li] performance evaluation
Performance results running fio(ioengine=io_uring,iodepth=256)

bs      IOPS(randread nomerges=0)   IOPS(randread nomerges=2)
           before / after                before / after
-----  ---------------------------  ---------------------------
512         818K / 840K                   855K / 897K
1k          816K / 842K                   853K / 898K
2k          820K / 839K                   850K / 899K
4k          818K / 840K                   852K / 895K
8k          574K / 574K                   574K / 574K
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NHongnan Li <hongnan.li@linux.alibaba.com>
Reviewed-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 867d8967
...@@ -1882,7 +1882,6 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, ...@@ -1882,7 +1882,6 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
* blk_attempt_plug_merge - try to merge with %current's plugged list * blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at * @q: request_queue new bio is being queued at
* @bio: new bio being queued * @bio: new bio being queued
* @request_count: out parameter for number of traversed plugged requests
* @same_queue_rq: pointer to &struct request that gets filled in when * @same_queue_rq: pointer to &struct request that gets filled in when
* another request associated with @q is found on the plug list * another request associated with @q is found on the plug list
* (optional, may be %NULL) * (optional, may be %NULL)
...@@ -1901,7 +1900,6 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, ...@@ -1901,7 +1900,6 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
* Caller must ensure !blk_queue_nomerges(q) beforehand. * Caller must ensure !blk_queue_nomerges(q) beforehand.
*/ */
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count,
struct request **same_queue_rq) struct request **same_queue_rq)
{ {
struct blk_plug *plug; struct blk_plug *plug;
...@@ -1911,7 +1909,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, ...@@ -1911,7 +1909,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
plug = current->plug; plug = current->plug;
if (!plug) if (!plug)
return false; return false;
*request_count = 0;
if (q->mq_ops) if (q->mq_ops)
plug_list = &plug->mq_list; plug_list = &plug->mq_list;
...@@ -1921,14 +1918,12 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, ...@@ -1921,14 +1918,12 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
list_for_each_entry_reverse(rq, plug_list, queuelist) { list_for_each_entry_reverse(rq, plug_list, queuelist) {
bool merged = false; bool merged = false;
if (rq->q == q) { if (rq->q == q && same_queue_rq) {
(*request_count)++;
/* /*
* Only blk-mq multiple hardware queues case checks the * Only blk-mq multiple hardware queues case checks the
* rq in the same queue, there should be only one such * rq in the same queue, there should be only one such
* rq in a queue * rq in a queue
**/ **/
if (same_queue_rq)
*same_queue_rq = rq; *same_queue_rq = rq;
} }
...@@ -1956,30 +1951,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, ...@@ -1956,30 +1951,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
return false; return false;
} }
unsigned int blk_plug_queued_count(struct request_queue *q)
{
struct blk_plug *plug;
struct request *rq;
struct list_head *plug_list;
unsigned int ret = 0;
plug = current->plug;
if (!plug)
goto out;
if (q->mq_ops)
plug_list = &plug->mq_list;
else
plug_list = &plug->list;
list_for_each_entry(rq, plug_list, queuelist) {
if (rq->q == q)
ret++;
}
out:
return ret;
}
void blk_init_request_from_bio(struct request *req, struct bio *bio) void blk_init_request_from_bio(struct request *req, struct bio *bio)
{ {
if (bio->bi_opf & REQ_RAHEAD) if (bio->bi_opf & REQ_RAHEAD)
...@@ -2000,7 +1971,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -2000,7 +1971,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
struct blk_plug *plug; struct blk_plug *plug;
int where = ELEVATOR_INSERT_SORT; int where = ELEVATOR_INSERT_SORT;
struct request *req, *free; struct request *req, *free;
unsigned int request_count = 0;
/* /*
* low level driver can indicate that it wants pages above a * low level driver can indicate that it wants pages above a
...@@ -2025,10 +1995,9 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -2025,10 +1995,9 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
* any locks. * any locks.
*/ */
if (!blk_queue_nomerges(q)) { if (!blk_queue_nomerges(q)) {
if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) if (blk_attempt_plug_merge(q, bio, NULL))
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} else }
request_count = blk_plug_queued_count(q);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -2092,6 +2061,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -2092,6 +2061,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
plug = current->plug; plug = current->plug;
if (plug) { if (plug) {
unsigned int request_count = plug->rq_count;
/* /*
* If this is the first request added after a plug, fire * If this is the first request added after a plug, fire
* of a plug trace. * of a plug trace.
...@@ -3623,6 +3594,7 @@ void blk_start_plug(struct blk_plug *plug) ...@@ -3623,6 +3594,7 @@ void blk_start_plug(struct blk_plug *plug)
INIT_LIST_HEAD(&plug->list); INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->mq_list); INIT_LIST_HEAD(&plug->mq_list);
INIT_LIST_HEAD(&plug->cb_list); INIT_LIST_HEAD(&plug->cb_list);
plug->rq_count = 0;
plug->multiple_queues = false; plug->multiple_queues = false;
/* /*
* Store ordering should not be needed here, since a potential * Store ordering should not be needed here, since a potential
......
...@@ -1689,6 +1689,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -1689,6 +1689,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
if (plug->multiple_queues) if (plug->multiple_queues)
list_sort(NULL, &list, plug_rq_cmp); list_sort(NULL, &list, plug_rq_cmp);
plug->rq_count = 0;
this_q = NULL; this_q = NULL;
this_hctx = NULL; this_hctx = NULL;
this_ctx = NULL; this_ctx = NULL;
...@@ -1890,6 +1891,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1890,6 +1891,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
{ {
list_add_tail(&rq->queuelist, &plug->mq_list); list_add_tail(&rq->queuelist, &plug->mq_list);
plug->rq_count++;
if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) { if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
struct request *tmp; struct request *tmp;
...@@ -1906,7 +1908,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1906,7 +1908,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
const int is_flush_fua = op_is_flush(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0}; struct blk_mq_alloc_data data = { .flags = 0};
struct request *rq; struct request *rq;
unsigned int request_count = 0;
struct blk_plug *plug; struct blk_plug *plug;
struct request *same_queue_rq = NULL; struct request *same_queue_rq = NULL;
blk_qc_t cookie; blk_qc_t cookie;
...@@ -1919,7 +1920,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1919,7 +1920,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
if (!is_flush_fua && !blk_queue_nomerges(q) && if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) blk_attempt_plug_merge(q, bio, &same_queue_rq))
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
if (blk_mq_sched_bio_merge(q, bio)) if (blk_mq_sched_bio_merge(q, bio))
...@@ -1955,20 +1956,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1955,20 +1956,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
* Use plugging if we have a ->commit_rqs() hook as well, as * Use plugging if we have a ->commit_rqs() hook as well, as
* we know the driver uses bd->last in a smart fashion. * we know the driver uses bd->last in a smart fashion.
*/ */
unsigned int request_count = plug->rq_count;
struct request *last = NULL; struct request *last = NULL;
blk_mq_put_ctx(data.ctx); blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
/*
* @request_count may become stale because of schedule
* out, so check the list again.
*/
if (list_empty(&plug->mq_list))
request_count = 0;
else if (blk_queue_nomerges(q))
request_count = blk_plug_queued_count(q);
if (!request_count) if (!request_count)
trace_block_plug(q); trace_block_plug(q);
else else
......
...@@ -188,9 +188,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req, ...@@ -188,9 +188,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
struct bio *bio); struct bio *bio);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count,
struct request **same_queue_rq); struct request **same_queue_rq);
unsigned int blk_plug_queued_count(struct request_queue *q);
void blk_account_io_start(struct request *req, bool new_io); void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes); void blk_account_io_completion(struct request *req, unsigned int bytes);
......
...@@ -1374,6 +1374,7 @@ struct blk_plug { ...@@ -1374,6 +1374,7 @@ struct blk_plug {
struct list_head list; /* requests */ struct list_head list; /* requests */
struct list_head mq_list; /* blk-mq requests */ struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */ struct list_head cb_list; /* md requires an unplug callback */
unsigned short rq_count;
bool multiple_queues; bool multiple_queues;
}; };
#define BLK_MAX_REQUEST_COUNT 16 #define BLK_MAX_REQUEST_COUNT 16
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册