提交 7c058cfc 编写于 作者: J Jens Axboe 提交者: Caspar Zhang

blk-mq: kill q->mq_map

to #28991349

commit a8908939af569ce2419f43fd56eeaf003bc3d85d upstream

It's just a pointer to set->mq_map, use that instead. Move the
assignment a bit earlier, so we always know it's valid.
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NHannes Reinecke <hare@suse.com>
Reviewed-by: NBart Van Assche <bvanassche@acm.org>
Reviewed-by: NKeith Busch <keith.busch@intel.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Reviewed-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 292e47ef
...@@ -2392,7 +2392,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2392,7 +2392,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* If the cpu isn't present, the cpu is mapped to first hctx. * If the cpu isn't present, the cpu is mapped to first hctx.
*/ */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
hctx_idx = q->mq_map[i]; hctx_idx = set->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */ /* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] && if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) { !__blk_mq_alloc_rq_map(set, hctx_idx)) {
...@@ -2402,7 +2402,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2402,7 +2402,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* case, remap the current ctx to hctx[0] which * case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated * is guaranteed to always have tags allocated
*/ */
q->mq_map[i] = 0; set->mq_map[i] = 0;
} }
ctx = per_cpu_ptr(q->queue_ctx, i); ctx = per_cpu_ptr(q->queue_ctx, i);
...@@ -2500,8 +2500,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) ...@@ -2500,8 +2500,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
struct request_queue *q) struct request_queue *q)
{ {
q->tag_set = set;
mutex_lock(&set->tag_list_lock); mutex_lock(&set->tag_list_lock);
/* /*
...@@ -2566,8 +2564,6 @@ void blk_mq_release(struct request_queue *q) ...@@ -2566,8 +2564,6 @@ void blk_mq_release(struct request_queue *q)
kobject_put(&hctx->kobj); kobject_put(&hctx->kobj);
} }
q->mq_map = NULL;
kfree(q->queue_hw_ctx); kfree(q->queue_hw_ctx);
/* /*
...@@ -2652,7 +2648,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, ...@@ -2652,7 +2648,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int node; int node;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
node = blk_mq_hw_queue_to_node(q->mq_map, i); node = blk_mq_hw_queue_to_node(set->mq_map, i);
/* /*
* If the hw queue has been mapped to another numa node, * If the hw queue has been mapped to another numa node,
* we need to realloc the hctx. If allocation fails, fallback * we need to realloc the hctx. If allocation fails, fallback
...@@ -2728,8 +2724,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2728,8 +2724,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_hw_ctx) if (!q->queue_hw_ctx)
goto err_sys_init; goto err_sys_init;
q->mq_map = set->mq_map;
blk_mq_realloc_hw_ctxs(set, q); blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues) if (!q->nr_hw_queues)
goto err_hctxs; goto err_hctxs;
...@@ -2738,6 +2732,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2738,6 +2732,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids; q->nr_queues = nr_cpu_ids;
q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
if (q->mq_ops->poll) if (q->mq_ops->poll)
......
...@@ -81,7 +81,9 @@ extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); ...@@ -81,7 +81,9 @@ extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
int cpu) int cpu)
{ {
return q->queue_hw_ctx[q->mq_map[cpu]]; struct blk_mq_tag_set *set = q->tag_set;
return q->queue_hw_ctx[set->mq_map[cpu]];
} }
/* /*
......
...@@ -483,8 +483,6 @@ struct request_queue { ...@@ -483,8 +483,6 @@ struct request_queue {
const struct blk_mq_ops *mq_ops; const struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
/* sw queues */ /* sw queues */
struct blk_mq_ctx __percpu *queue_ctx; struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues; unsigned int nr_queues;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册