提交 5e84ea3a 编写于 作者: J Jens Axboe

block: attempt to merge with existing requests on plug flush

One of the disadvantages of on-stack plugging is that we potentially
lose out on merging since all pending IO isn't always visible to
everybody. When we flush the on-stack plugs, right now we don't do
any checks to see if potential merge candidates could be utilized.

Correct this by adding a new insert variant, ELEVATOR_INSERT_SORT_MERGE.
It works just ELEVATOR_INSERT_SORT, but first checks whether we can
merge with an existing request before doing the insertion (if we fail
merging).

This fixes a regression with multiple processes issuing IO that
can be merged.

Thanks to Shaohua Li <shaohua.li@intel.com> for testing and fixing
an accounting bug.
Signed-off-by: NJens Axboe <jaxboe@fusionio.com>
上级 4345caba
......@@ -2685,7 +2685,7 @@ static void flush_plug_list(struct blk_plug *plug)
/*
* rq is already accounted, so use raw insert
*/
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
}
if (q) {
......
......@@ -465,3 +465,9 @@ int attempt_front_merge(struct request_queue *q, struct request *rq)
return 0;
}
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next)
{
return attempt_merge(q, rq, next);
}
......@@ -103,6 +103,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio);
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
......
......@@ -521,6 +521,40 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
/*
* Attempt to do an insertion back merge. Only check for the case where
* we can append 'rq' to an existing request, so we can throw 'rq' away
* afterwards.
*
* Returns true if we merged, false otherwise
*/
static bool elv_attempt_insert_merge(struct request_queue *q,
struct request *rq)
{
struct request *__rq;
if (blk_queue_nomerges(q))
return false;
/*
* First try one-hit cache.
*/
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
return true;
if (blk_queue_noxmerges(q))
return false;
/*
* See if our hash lookup can find a potential backmerge.
*/
__rq = elv_rqhash_find(q, blk_rq_pos(rq));
if (__rq && blk_attempt_req_merge(q, __rq, rq))
return true;
return false;
}
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
{
struct elevator_queue *e = q->elevator;
......@@ -538,14 +572,18 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
const int next_sorted = next->cmd_flags & REQ_SORTED;
if (e->ops->elevator_merge_req_fn)
if (next_sorted && e->ops->elevator_merge_req_fn)
e->ops->elevator_merge_req_fn(q, rq, next);
elv_rqhash_reposition(q, rq);
elv_rqhash_del(q, next);
if (next_sorted) {
elv_rqhash_del(q, next);
q->nr_sorted--;
}
q->last_merge = rq;
}
......@@ -647,6 +685,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
__blk_run_queue(q, false);
break;
case ELEVATOR_INSERT_SORT_MERGE:
/*
* If we succeed in merging this request with one in the
* queue already, we are done - rq has now been freed,
* so no need to do anything further.
*/
if (elv_attempt_insert_merge(q, rq))
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
!(rq->cmd_flags & REQ_DISCARD));
......
......@@ -166,6 +166,7 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
#define ELEVATOR_INSERT_SORT 3
#define ELEVATOR_INSERT_REQUEUE 4
#define ELEVATOR_INSERT_FLUSH 5
#define ELEVATOR_INSERT_SORT_MERGE 6
/*
* return values from elevator_may_queue_fn
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册