提交 2ca20c32 编写于 作者: J Jens Axboe 提交者: Caspar Zhang

blk-mq: support multiple hctx maps

to #28991349

commit b3c661b15d5ab11d982e58bee23e05c1780528a1 upstream

Add support for the tag set carrying multiple queue maps, and
for the driver to inform blk-mq how many it wishes to support
through setting set->nr_maps.

This adds an mq_ops helper for drivers that support more than 1
map, mq_ops->rq_flags_to_type(). The function takes request/bio
flags and CPU, and returns a queue map index for that. We then
use the type information in blk_mq_map_queue() to index the map
set.
Reviewed-by: NHannes Reinecke <hare@suse.com>
Reviewed-by: NKeith Busch <keith.busch@intel.com>
Reviewed-by: NSagi Grimberg <sagi@grimberg.me>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Reviewed-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 37567ba0
...@@ -2328,7 +2328,8 @@ static int blk_mq_init_hctx(struct request_queue *q, ...@@ -2328,7 +2328,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
static void blk_mq_init_cpu_queues(struct request_queue *q, static void blk_mq_init_cpu_queues(struct request_queue *q,
unsigned int nr_hw_queues) unsigned int nr_hw_queues)
{ {
unsigned int i; struct blk_mq_tag_set *set = q->tag_set;
unsigned int i, j;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
...@@ -2343,10 +2344,12 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, ...@@ -2343,10 +2344,12 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
* Set local node, IFF we have more than one hw queue. If * Set local node, IFF we have more than one hw queue. If
* not, we remain on the home node of the device * not, we remain on the home node of the device
*/ */
hctx = blk_mq_map_queue_type(q, 0, i); for (j = 0; j < set->nr_maps; j++) {
hctx = blk_mq_map_queue_type(q, j, i);
if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
hctx->numa_node = local_memory_node(cpu_to_node(i)); hctx->numa_node = local_memory_node(cpu_to_node(i));
} }
}
} }
static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx) static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
...@@ -2380,7 +2383,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, ...@@ -2380,7 +2383,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
static void blk_mq_map_swqueue(struct request_queue *q) static void blk_mq_map_swqueue(struct request_queue *q)
{ {
unsigned int i, hctx_idx; unsigned int i, j, hctx_idx;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
...@@ -2416,9 +2419,19 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2416,9 +2419,19 @@ static void blk_mq_map_swqueue(struct request_queue *q)
} }
ctx = per_cpu_ptr(q->queue_ctx, i); ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue_type(q, 0, i); for (j = 0; j < set->nr_maps; j++) {
hctx->type = 0; hctx = blk_mq_map_queue_type(q, j, i);
/*
* If the CPU is already set in the mask, then we've
* mapped this one already. This can happen if
* devices share queues across queue maps.
*/
if (cpumask_test_cpu(i, hctx->cpumask))
continue;
cpumask_set_cpu(i, hctx->cpumask); cpumask_set_cpu(i, hctx->cpumask);
hctx->type = j;
ctx->index_hw[hctx->type] = hctx->nr_ctx; ctx->index_hw[hctx->type] = hctx->nr_ctx;
hctx->ctxs[hctx->nr_ctx++] = ctx; hctx->ctxs[hctx->nr_ctx++] = ctx;
...@@ -2428,6 +2441,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2428,6 +2441,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
*/ */
BUG_ON(!hctx->nr_ctx); BUG_ON(!hctx->nr_ctx);
} }
}
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
...@@ -2866,6 +2880,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) ...@@ -2866,6 +2880,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{ {
if (set->ops->map_queues) { if (set->ops->map_queues) {
int i;
/* /*
* transport .map_queues is usually done in the following * transport .map_queues is usually done in the following
* way: * way:
...@@ -2873,18 +2889,21 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) ...@@ -2873,18 +2889,21 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
* for (queue = 0; queue < set->nr_hw_queues; queue++) { * for (queue = 0; queue < set->nr_hw_queues; queue++) {
* mask = get_cpu_mask(queue) * mask = get_cpu_mask(queue)
* for_each_cpu(cpu, mask) * for_each_cpu(cpu, mask)
* set->map.mq_map[cpu] = queue; * set->map[x].mq_map[cpu] = queue;
* } * }
* *
* When we need to remap, the table has to be cleared for * When we need to remap, the table has to be cleared for
* killing stale mapping since one CPU may not be mapped * killing stale mapping since one CPU may not be mapped
* to any hw queue. * to any hw queue.
*/ */
blk_mq_clear_mq_map(&set->map[0]); for (i = 0; i < set->nr_maps; i++)
blk_mq_clear_mq_map(&set->map[i]);
return set->ops->map_queues(set); return set->ops->map_queues(set);
} else } else {
BUG_ON(set->nr_maps > 1);
return blk_mq_map_queues(&set->map[0]); return blk_mq_map_queues(&set->map[0]);
}
} }
/* /*
...@@ -2895,7 +2914,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) ...@@ -2895,7 +2914,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
*/ */
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{ {
int ret; int i, ret;
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
...@@ -2918,6 +2937,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) ...@@ -2918,6 +2937,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
set->queue_depth = BLK_MQ_MAX_DEPTH; set->queue_depth = BLK_MQ_MAX_DEPTH;
} }
if (!set->nr_maps)
set->nr_maps = 1;
else if (set->nr_maps > HCTX_MAX_TYPES)
return -EINVAL;
/* /*
* If a crashdump is active, then we are potentially in a very * If a crashdump is active, then we are potentially in a very
* memory constrained environment. Limit us to 1 queue and * memory constrained environment. Limit us to 1 queue and
...@@ -2939,12 +2963,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) ...@@ -2939,12 +2963,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
return -ENOMEM; return -ENOMEM;
ret = -ENOMEM; ret = -ENOMEM;
set->map[0].mq_map = kcalloc_node(nr_cpu_ids, for (i = 0; i < set->nr_maps; i++) {
sizeof(*set->map[0].mq_map), set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
sizeof(struct blk_mq_queue_map),
GFP_KERNEL, set->numa_node); GFP_KERNEL, set->numa_node);
if (!set->map[0].mq_map) if (!set->map[i].mq_map)
goto out_free_tags; goto out_free_mq_map;
set->map[0].nr_queues = set->nr_hw_queues; set->map[i].nr_queues = set->nr_hw_queues;
}
ret = blk_mq_update_queue_map(set); ret = blk_mq_update_queue_map(set);
if (ret) if (ret)
...@@ -2960,9 +2986,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) ...@@ -2960,9 +2986,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
return 0; return 0;
out_free_mq_map: out_free_mq_map:
kfree(set->map[0].mq_map); for (i = 0; i < set->nr_maps; i++) {
set->map[0].mq_map = NULL; kfree(set->map[i].mq_map);
out_free_tags: set->map[i].mq_map = NULL;
}
kfree(set->tags); kfree(set->tags);
set->tags = NULL; set->tags = NULL;
return ret; return ret;
...@@ -2971,13 +2998,15 @@ EXPORT_SYMBOL(blk_mq_alloc_tag_set); ...@@ -2971,13 +2998,15 @@ EXPORT_SYMBOL(blk_mq_alloc_tag_set);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set) void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{ {
int i; int i, j;
for (i = 0; i < nr_cpu_ids; i++) for (i = 0; i < nr_cpu_ids; i++)
blk_mq_free_map_and_requests(set, i); blk_mq_free_map_and_requests(set, i);
kfree(set->map[0].mq_map); for (j = 0; j < set->nr_maps; j++) {
set->map[0].mq_map = NULL; kfree(set->map[j].mq_map);
set->map[j].mq_map = NULL;
}
kfree(set->tags); kfree(set->tags);
set->tags = NULL; set->tags = NULL;
......
...@@ -78,20 +78,37 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, ...@@ -78,20 +78,37 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
*/ */
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, /*
unsigned int flags, * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
* @q: request queue
* @hctx_type: the hctx type index
* @cpu: CPU
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
unsigned int hctx_type,
unsigned int cpu) unsigned int cpu)
{ {
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
return q->queue_hw_ctx[set->map[0].mq_map[cpu]]; return q->queue_hw_ctx[set->map[hctx_type].mq_map[cpu]];
} }
static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, /*
unsigned int hctx_type, * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
* @q: request queue
* @flags: request command flags
* @cpu: CPU
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags,
unsigned int cpu) unsigned int cpu)
{ {
return blk_mq_map_queue(q, hctx_type, cpu); int hctx_type = 0;
if (q->mq_ops->rq_flags_to_type)
hctx_type = q->mq_ops->rq_flags_to_type(q, flags);
return blk_mq_map_queue_type(q, hctx_type, cpu);
} }
/* /*
......
...@@ -90,7 +90,14 @@ enum { ...@@ -90,7 +90,14 @@ enum {
}; };
struct blk_mq_tag_set { struct blk_mq_tag_set {
/*
* map[] holds ctx -> hctx mappings, one map exists for each type
* that the driver wishes to support. There are no restrictions
* on maps being of the same size, and it's perfectly legal to
* share maps between types.
*/
struct blk_mq_queue_map map[HCTX_MAX_TYPES]; struct blk_mq_queue_map map[HCTX_MAX_TYPES];
unsigned int nr_maps; /* nr entries in map[] */
const struct blk_mq_ops *ops; const struct blk_mq_ops *ops;
unsigned int nr_hw_queues; /* nr hw queues across maps */ unsigned int nr_hw_queues; /* nr hw queues across maps */
unsigned int queue_depth; /* max hw supported */ unsigned int queue_depth; /* max hw supported */
...@@ -122,6 +129,8 @@ struct blk_mq_queue_data { ...@@ -122,6 +129,8 @@ struct blk_mq_queue_data {
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
const struct blk_mq_queue_data *); const struct blk_mq_queue_data *);
typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *);
/* takes rq->cmd_flags as input, returns a hardware type index */
typedef int (rq_flags_to_type_fn)(struct request_queue *, unsigned int);
typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
...@@ -154,6 +163,10 @@ struct blk_mq_ops { ...@@ -154,6 +163,10 @@ struct blk_mq_ops {
* would have done). * would have done).
*/ */
commit_rqs_fn *commit_rqs; commit_rqs_fn *commit_rqs;
/*
Return a queue map type for the given request/bio flags
*/
rq_flags_to_type_fn *rq_flags_to_type;
/* /*
* Reserve budget before queue request, once .queue_rq is * Reserve budget before queue request, once .queue_rq is
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册