diff --git a/block/blk-flush.c b/block/blk-flush.c index bf7e46ddb1123f83501ff9ccf787d3234454a646..21de869c2b4756501a5554771e5e7c21e78d3ade 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -242,7 +242,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) if (fq->rq_status != BLK_STS_OK) error = fq->rq_status; - hctx = blk_mq_map_queue(q, flush_rq->cmd_flags, flush_rq->mq_ctx->cpu); + hctx = flush_rq->mq_hctx; if (!q->elevator) { blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); flush_rq->tag = -1; @@ -345,15 +345,13 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, * just for cheating put/get driver tag. */ if (q->mq_ops) { - struct blk_mq_hw_ctx *hctx; - flush_rq->mq_ctx = first_rq->mq_ctx; + flush_rq->mq_hctx = first_rq->mq_hctx; if (!q->elevator) { fq->orig_rq = first_rq; flush_rq->tag = first_rq->tag; - hctx = blk_mq_map_queue(q, first_rq->cmd_flags, first_rq->mq_ctx->cpu); - blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); + blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq); } else { flush_rq->internal_tag = first_rq->internal_tag; } @@ -414,13 +412,11 @@ static void flush_data_end_io(struct request *rq, blk_status_t error) static void mq_flush_data_end_io(struct request *rq, blk_status_t error) { struct request_queue *q = rq->q; - struct blk_mq_hw_ctx *hctx; + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_mq_ctx *ctx = rq->mq_ctx; unsigned long flags; struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); - hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu); - if (q->elevator) { WARN_ON(rq->tag < 0); blk_mq_put_driver_tag_hctx(hctx, rq); diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 7698c4a62aaf54fe733fc282f489c19f1efb5614..c6db51075e8b3b04947d305d08c8d204f1696b2c 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -514,10 +514,8 @@ struct show_busy_params { static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved) { const struct show_busy_params *params = data; - struct blk_mq_hw_ctx *hctx; - hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); - if (hctx == params->hctx && blk_mq_rq_state(rq) != MQ_RQ_IDLE) + if (rq->mq_hctx == params->hctx && blk_mq_rq_state(rq) != MQ_RQ_IDLE) __blk_mq_debugfs_rq_show(params->m, list_entry_rq(&rq->queuelist)); } diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 716dbee9d8bdc9b62c35f69db50725413d331553..7a8f019ac31159f21d19bbd5c9a80fd8bff4a538 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -368,9 +368,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx = rq->mq_ctx; - struct blk_mq_hw_ctx *hctx; - - hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu); + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; /* flush rq in flush machinery need to be dispatched directly */ if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { @@ -409,7 +407,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q, /* For list inserts, requests better be on the same hw queue */ rq = list_first_entry(list, struct request, queuelist); - hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu); + hctx = rq->mq_hctx; /* * blk_mq_sched_insert_requests() is called from flush plug diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 30582f2f55536c0664de1791a1415a48af1e022b..b5a1b83804c9abf6657283f70e102ba0374a2f4a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -469,13 +469,10 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, u32 blk_mq_unique_tag(struct request *rq) { struct request_queue *q = rq->q; - struct blk_mq_hw_ctx *hctx; int hwq = 0; - if (q->mq_ops) { - hctx = blk_mq_map_queue(q, rq->cmd_flags, rq->mq_ctx->cpu); - hwq = hctx->queue_num; - } + if (q->mq_ops) + hwq = rq->mq_hctx->queue_num; return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); diff --git a/block/blk-mq.c b/block/blk-mq.c index 45afd3015a56df3169c51bf0c1b6259128353071..e00c8a52563ed369dd86815b8e3fe6bcb40ca3ed 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -329,6 +329,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, /* csd/requeue_work/fifo_time is initialized before use */ rq->q = data->q; rq->mq_ctx = data->ctx; + rq->mq_hctx = data->hctx; rq->rq_flags = rq_flags; rq->cpu = -1; rq->cmd_flags = op; @@ -515,9 +516,10 @@ void __blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; struct blk_mq_ctx *ctx = rq->mq_ctx; - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu); + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; const int sched_tag = rq->internal_tag; + rq->mq_hctx = NULL; if (rq->tag != -1) blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); if (sched_tag != -1) @@ -531,7 +533,7 @@ void blk_mq_free_request(struct request *rq) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx = rq->mq_ctx; - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu); + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (rq->rq_flags & RQF_ELVPRIV) { if (e && e->type->ops.mq.finish_request) @@ -1020,7 +1022,7 @@ bool blk_mq_get_driver_tag(struct request *rq) { struct blk_mq_alloc_data data = { .q = rq->q, - .hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu), + .hctx = rq->mq_hctx, .flags = BLK_MQ_REQ_NOWAIT, .cmd_flags = rq->cmd_flags, }; @@ -1186,7 +1188,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, rq = list_first_entry(list, struct request, queuelist); - hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); + hctx = rq->mq_hctx; if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) break; @@ -1624,9 +1626,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, */ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) { - struct blk_mq_ctx *ctx = rq->mq_ctx; - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, - ctx->cpu); + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; spin_lock(&hctx->lock); list_add_tail(&rq->queuelist, &hctx->dispatch); @@ -1838,9 +1838,7 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) blk_status_t ret; int srcu_idx; blk_qc_t unused_cookie; - struct blk_mq_ctx *ctx = rq->mq_ctx; - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, - ctx->cpu); + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx_lock(hctx, &srcu_idx); ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); @@ -1992,9 +1990,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_put_ctx(data.ctx); if (same_queue_rq) { - data.hctx = blk_mq_map_queue(q, - same_queue_rq->cmd_flags, - same_queue_rq->mq_ctx->cpu); + data.hctx = same_queue_rq->mq_hctx; blk_mq_try_issue_directly(data.hctx, same_queue_rq, &cookie); } diff --git a/block/blk-mq.h b/block/blk-mq.h index f6dfbe2bec9ae895c9446392c9789e6edc633232..3fc27338af3ae7de89b04c86b59ba3acdf251722 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -231,13 +231,10 @@ static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, static inline void blk_mq_put_driver_tag(struct request *rq) { - struct blk_mq_hw_ctx *hctx; - if (rq->tag == -1 || rq->internal_tag == -1) return; - hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); - __blk_mq_put_driver_tag(hctx, rq); + __blk_mq_put_driver_tag(rq->mq_hctx, rq); } static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bb7ac9a11fe69e736e2a2452e0aa475d1f2ae4aa..18683d5bb5273ec0f9e1f3ab354ee4a6427a7c5c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -150,6 +150,7 @@ enum mq_rq_state { struct request { struct request_queue *q; struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; int cpu; unsigned int cmd_flags; /* op and common flags */