diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 140933e4a7d12efd628695b10acf71ba63d5c01a..40905539afed347ebb7882d7e02c824c096e16ce 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -321,7 +321,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); bool ret = false; enum hctx_type type; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 2089c6c62f44e4e1ffcccdfb542ed17e92f3a7a2..a4931fc7be8abf687646312244995a818056e9e3 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -170,7 +170,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) data->ctx = blk_mq_get_ctx(data->q); data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, - data->ctx->cpu); + data->ctx); tags = blk_mq_tags_from_data(data); if (data->flags & BLK_MQ_REQ_RESERVED) bt = &tags->breserved_tags; diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f5b533764ca0c286ec0d34b3462d7e64e3713d9..445d0a2642ae079b2e3b5f9923ce7ee6010a69c4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -364,7 +364,7 @@ static struct request *blk_mq_get_request(struct request_queue *q, } if (likely(!data->hctx)) data->hctx = blk_mq_map_queue(q, data->cmd_flags, - data->ctx->cpu); + data->ctx); if (data->cmd_flags & REQ_NOWAIT) data->flags |= BLK_MQ_REQ_NOWAIT; @@ -2435,7 +2435,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) continue; hctx = blk_mq_map_queue_type(q, j, i); - + ctx->hctxs[j] = hctx; /* * If the CPU is already set in the mask, then we've * mapped this one already. This can happen if diff --git a/block/blk-mq.h b/block/blk-mq.h index d943d46b078547e5f48d488575be5178fb0195e5..9fb06261518edaff9fea25e52a40620550ed7656 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -23,6 +23,7 @@ struct blk_mq_ctx { unsigned int cpu; unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; /* incremented at dispatch time */ unsigned long rq_dispatched[2]; @@ -97,11 +98,11 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue * * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue * @q: request queue * @flags: request command flags - * @cpu: CPU + * @cpu: cpu ctx */ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, unsigned int flags, - unsigned int cpu) + struct blk_mq_ctx *ctx) { enum hctx_type type = HCTX_TYPE_DEFAULT; @@ -116,7 +117,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, q->tag_set->map[HCTX_TYPE_READ].nr_queues) type = HCTX_TYPE_READ; - return blk_mq_map_queue_type(q, type, cpu); + return ctx->hctxs[type]; } /* diff --git a/block/blk.h b/block/blk.h index 848278c520306819c39f61c49ffc841577f590dc..5d636ee416630e09602c394be8c6b8042a276672 100644 --- a/block/blk.h +++ b/block/blk.h @@ -38,7 +38,7 @@ extern struct ida blk_queue_ida; static inline struct blk_flush_queue * blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) { - return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq; + return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; } static inline void __blk_get_queue(struct request_queue *q)