提交 107174b9 编写于 作者: J Jens Axboe 提交者: Caspar Zhang

blk-mq: cache request hardware queue mapping

to #28991349

commit ea4f995ee8b8f0578b3319949f2edd5d812fdb0a upstream

We call blk_mq_map_queue() a lot, at least two times for each
request per IO, sometimes more. Since we now have an indirect
call as well in that function. cache the mapping so we don't
have to re-call blk_mq_map_queue() for the same request
multiple times.
Reviewed-by: NKeith Busch <keith.busch@intel.com>
Reviewed-by: NSagi Grimberg <sagi@grimberg.me>
Reviewed-by: NHannes Reinecke <hare@suse.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Reviewed-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 3eea3213
...@@ -242,7 +242,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) ...@@ -242,7 +242,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
if (fq->rq_status != BLK_STS_OK) if (fq->rq_status != BLK_STS_OK)
error = fq->rq_status; error = fq->rq_status;
hctx = blk_mq_map_queue(q, flush_rq->cmd_flags, flush_rq->mq_ctx->cpu); hctx = flush_rq->mq_hctx;
if (!q->elevator) { if (!q->elevator) {
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1; flush_rq->tag = -1;
...@@ -345,15 +345,13 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, ...@@ -345,15 +345,13 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
* just for cheating put/get driver tag. * just for cheating put/get driver tag.
*/ */
if (q->mq_ops) { if (q->mq_ops) {
struct blk_mq_hw_ctx *hctx;
flush_rq->mq_ctx = first_rq->mq_ctx; flush_rq->mq_ctx = first_rq->mq_ctx;
flush_rq->mq_hctx = first_rq->mq_hctx;
if (!q->elevator) { if (!q->elevator) {
fq->orig_rq = first_rq; fq->orig_rq = first_rq;
flush_rq->tag = first_rq->tag; flush_rq->tag = first_rq->tag;
hctx = blk_mq_map_queue(q, first_rq->cmd_flags, first_rq->mq_ctx->cpu); blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
} else { } else {
flush_rq->internal_tag = first_rq->internal_tag; flush_rq->internal_tag = first_rq->internal_tag;
} }
...@@ -414,13 +412,11 @@ static void flush_data_end_io(struct request *rq, blk_status_t error) ...@@ -414,13 +412,11 @@ static void flush_data_end_io(struct request *rq, blk_status_t error)
static void mq_flush_data_end_io(struct request *rq, blk_status_t error) static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
unsigned long flags; unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
if (q->elevator) { if (q->elevator) {
WARN_ON(rq->tag < 0); WARN_ON(rq->tag < 0);
blk_mq_put_driver_tag_hctx(hctx, rq); blk_mq_put_driver_tag_hctx(hctx, rq);
......
...@@ -514,10 +514,8 @@ struct show_busy_params { ...@@ -514,10 +514,8 @@ struct show_busy_params {
static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved) static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
{ {
const struct show_busy_params *params = data; const struct show_busy_params *params = data;
struct blk_mq_hw_ctx *hctx;
hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); if (rq->mq_hctx == params->hctx && blk_mq_rq_state(rq) != MQ_RQ_IDLE)
if (hctx == params->hctx && blk_mq_rq_state(rq) != MQ_RQ_IDLE)
__blk_mq_debugfs_rq_show(params->m, __blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist)); list_entry_rq(&rq->queuelist));
} }
......
...@@ -368,9 +368,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, ...@@ -368,9 +368,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
/* flush rq in flush machinery need to be dispatched directly */ /* flush rq in flush machinery need to be dispatched directly */
if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
...@@ -409,7 +407,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q, ...@@ -409,7 +407,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
/* For list inserts, requests better be on the same hw queue */ /* For list inserts, requests better be on the same hw queue */
rq = list_first_entry(list, struct request, queuelist); rq = list_first_entry(list, struct request, queuelist);
hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu); hctx = rq->mq_hctx;
/* /*
* blk_mq_sched_insert_requests() is called from flush plug * blk_mq_sched_insert_requests() is called from flush plug
......
...@@ -469,13 +469,10 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, ...@@ -469,13 +469,10 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
u32 blk_mq_unique_tag(struct request *rq) u32 blk_mq_unique_tag(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
int hwq = 0; int hwq = 0;
if (q->mq_ops) { if (q->mq_ops)
hctx = blk_mq_map_queue(q, rq->cmd_flags, rq->mq_ctx->cpu); hwq = rq->mq_hctx->queue_num;
hwq = hctx->queue_num;
}
return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK); (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
......
...@@ -329,6 +329,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, ...@@ -329,6 +329,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
/* csd/requeue_work/fifo_time is initialized before use */ /* csd/requeue_work/fifo_time is initialized before use */
rq->q = data->q; rq->q = data->q;
rq->mq_ctx = data->ctx; rq->mq_ctx = data->ctx;
rq->mq_hctx = data->hctx;
rq->rq_flags = rq_flags; rq->rq_flags = rq_flags;
rq->cpu = -1; rq->cpu = -1;
rq->cmd_flags = op; rq->cmd_flags = op;
...@@ -515,9 +516,10 @@ void __blk_mq_free_request(struct request *rq) ...@@ -515,9 +516,10 @@ void __blk_mq_free_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu); struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
const int sched_tag = rq->internal_tag; const int sched_tag = rq->internal_tag;
rq->mq_hctx = NULL;
if (rq->tag != -1) if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1) if (sched_tag != -1)
...@@ -531,7 +533,7 @@ void blk_mq_free_request(struct request *rq) ...@@ -531,7 +533,7 @@ void blk_mq_free_request(struct request *rq)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu); struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (rq->rq_flags & RQF_ELVPRIV) { if (rq->rq_flags & RQF_ELVPRIV) {
if (e && e->type->ops.mq.finish_request) if (e && e->type->ops.mq.finish_request)
...@@ -1020,7 +1022,7 @@ bool blk_mq_get_driver_tag(struct request *rq) ...@@ -1020,7 +1022,7 @@ bool blk_mq_get_driver_tag(struct request *rq)
{ {
struct blk_mq_alloc_data data = { struct blk_mq_alloc_data data = {
.q = rq->q, .q = rq->q,
.hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu), .hctx = rq->mq_hctx,
.flags = BLK_MQ_REQ_NOWAIT, .flags = BLK_MQ_REQ_NOWAIT,
.cmd_flags = rq->cmd_flags, .cmd_flags = rq->cmd_flags,
}; };
...@@ -1186,7 +1188,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, ...@@ -1186,7 +1188,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
rq = list_first_entry(list, struct request, queuelist); rq = list_first_entry(list, struct request, queuelist);
hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); hctx = rq->mq_hctx;
if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
break; break;
...@@ -1624,9 +1626,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -1624,9 +1626,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
*/ */
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
{ {
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
ctx->cpu);
spin_lock(&hctx->lock); spin_lock(&hctx->lock);
list_add_tail(&rq->queuelist, &hctx->dispatch); list_add_tail(&rq->queuelist, &hctx->dispatch);
...@@ -1838,9 +1838,7 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) ...@@ -1838,9 +1838,7 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
blk_status_t ret; blk_status_t ret;
int srcu_idx; int srcu_idx;
blk_qc_t unused_cookie; blk_qc_t unused_cookie;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
ctx->cpu);
hctx_lock(hctx, &srcu_idx); hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
...@@ -1992,9 +1990,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1992,9 +1990,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_put_ctx(data.ctx); blk_mq_put_ctx(data.ctx);
if (same_queue_rq) { if (same_queue_rq) {
data.hctx = blk_mq_map_queue(q, data.hctx = same_queue_rq->mq_hctx;
same_queue_rq->cmd_flags,
same_queue_rq->mq_ctx->cpu);
blk_mq_try_issue_directly(data.hctx, same_queue_rq, blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie); &cookie);
} }
......
...@@ -231,13 +231,10 @@ static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, ...@@ -231,13 +231,10 @@ static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
static inline void blk_mq_put_driver_tag(struct request *rq) static inline void blk_mq_put_driver_tag(struct request *rq)
{ {
struct blk_mq_hw_ctx *hctx;
if (rq->tag == -1 || rq->internal_tag == -1) if (rq->tag == -1 || rq->internal_tag == -1)
return; return;
hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); __blk_mq_put_driver_tag(rq->mq_hctx, rq);
__blk_mq_put_driver_tag(hctx, rq);
} }
static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
......
...@@ -150,6 +150,7 @@ enum mq_rq_state { ...@@ -150,6 +150,7 @@ enum mq_rq_state {
struct request { struct request {
struct request_queue *q; struct request_queue *q;
struct blk_mq_ctx *mq_ctx; struct blk_mq_ctx *mq_ctx;
struct blk_mq_hw_ctx *mq_hctx;
int cpu; int cpu;
unsigned int cmd_flags; /* op and common flags */ unsigned int cmd_flags; /* op and common flags */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册