提交 3eea3213 编写于 作者: J Jens Axboe 提交者: Caspar Zhang

blk-mq: separate number of hardware queues from nr_cpu_ids

to #28991349

commit 392546aed22009060911f76b6ea24520e2f8b50f upstream

With multiple maps, nr_cpu_ids is no longer the maximum number of
hardware queues we support on a given devices. The initializer of
the tag_set can have set ->nr_hw_queues larger than the available
number of CPUs, since we can exceed that with multiple queue maps.
Reviewed-by: NHannes Reinecke <hare@suse.com>
Reviewed-by: NBart Van Assche <bvanassche@acm.org>
Reviewed-by: NKeith Busch <keith.busch@intel.com>
Reviewed-by: NSagi Grimberg <sagi@grimberg.me>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Reviewed-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 2ca20c32
...@@ -2731,6 +2731,19 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, ...@@ -2731,6 +2731,19 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
} }
/*
* Maximum number of hardware queues we support. For single sets, we'll never
* have more than the CPUs (software queues). For multiple sets, the tag_set
* user may have set ->nr_hw_queues larger.
*/
static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
{
if (set->nr_maps == 1)
return nr_cpu_ids;
return max(set->nr_hw_queues, nr_cpu_ids);
}
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q) struct request_queue *q)
{ {
...@@ -2749,7 +2762,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2749,7 +2762,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */ /* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q); blk_mq_sysfs_init(q);
q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)), q->nr_queues = nr_hw_queues(set);
q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node); GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx) if (!q->queue_hw_ctx)
goto err_sys_init; goto err_sys_init;
...@@ -2761,7 +2775,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2761,7 +2775,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
INIT_WORK(&q->timeout_work, blk_mq_timeout_work); INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids;
q->tag_set = set; q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
...@@ -2952,12 +2965,13 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) ...@@ -2952,12 +2965,13 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
set->queue_depth = min(64U, set->queue_depth); set->queue_depth = min(64U, set->queue_depth);
} }
/* /*
* There is no use for more h/w queues than cpus. * There is no use for more h/w queues than cpus if we just have
* a single map
*/ */
if (set->nr_hw_queues > nr_cpu_ids) if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids; set->nr_hw_queues = nr_cpu_ids;
set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *), set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node); GFP_KERNEL, set->numa_node);
if (!set->tags) if (!set->tags)
return -ENOMEM; return -ENOMEM;
...@@ -3000,7 +3014,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) ...@@ -3000,7 +3014,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{ {
int i, j; int i, j;
for (i = 0; i < nr_cpu_ids; i++) for (i = 0; i < nr_hw_queues(set); i++)
blk_mq_free_map_and_requests(set, i); blk_mq_free_map_and_requests(set, i);
for (j = 0; j < set->nr_maps; j++) { for (j = 0; j < set->nr_maps; j++) {
...@@ -3134,7 +3148,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, ...@@ -3134,7 +3148,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
lockdep_assert_held(&set->tag_list_lock); lockdep_assert_held(&set->tag_list_lock);
if (nr_hw_queues > nr_cpu_ids) if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
nr_hw_queues = nr_cpu_ids; nr_hw_queues = nr_cpu_ids;
if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
return; return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册