提交 b3260e04 编写于 作者: J John Garry 提交者: Zheng Zengkai

blk-mq: Refactor and rename blk_mq_free_map_and_{requests->rqs}()

mainline inclusion
from mainline-v5.16-rc1
commit 645db34e
category: performance
bugzilla: 186917, https://gitee.com/openeuler/kernel/issues/I5N1S5
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=645db34e50501aac141713fb47a315e5202ff890

--------------------------------

Refactor blk_mq_free_map_and_requests() such that it can be used at many
sites at which the tag map and rqs are freed.

Also rename to blk_mq_free_map_and_rqs(), which is shorter and matches the
alloc equivalent.
Suggested-by: NMing Lei <ming.lei@redhat.com>
Signed-off-by: NJohn Garry <john.garry@huawei.com>
Reviewed-by: NHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/1633429419-228500-12-git-send-email-john.garry@huawei.comSigned-off-by: NJens Axboe <axboe@kernel.dk>

Conflict: commit a846a8e6 ("blk-mq: don't free tags if the tag_set is
used by other device in queue initialztion") is already backported,
blk_mq_free_map_and_rqs() is moved to __blk_mq_update_nr_hw_queues()
instead of blk_mq_realloc_hw_ctxs().
Signed-off-by: NYu Kuai <yukuai3@huawei.com>
Reviewed-by: NYu Kuai <yukuai3@huawei.com>
Reviewed-by: NJason Yan <yanaijie@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 625089f4
......@@ -674,8 +674,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (!new)
return -ENOMEM;
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
blk_mq_free_rq_map(*tagsptr, set->flags);
blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
*tagsptr = new;
} else {
/*
......
......@@ -3016,15 +3016,15 @@ static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
return set->tags[hctx_idx];
}
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
unsigned int hctx_idx)
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags,
unsigned int hctx_idx)
{
unsigned int flags = set->flags;
if (set->tags && set->tags[hctx_idx]) {
blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
blk_mq_free_rq_map(set->tags[hctx_idx], flags);
set->tags[hctx_idx] = NULL;
if (tags) {
blk_mq_free_rqs(set, tags, hctx_idx);
blk_mq_free_rq_map(tags, flags);
}
}
......@@ -3105,8 +3105,10 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* fallback in case of a new remap fails
* allocation
*/
if (i && set->tags[i])
blk_mq_free_map_and_requests(set, i);
if (i && set->tags[i]) {
blk_mq_free_map_and_rqs(set, set->tags[i], i);
set->tags[i] = NULL;
}
hctx->tags = NULL;
continue;
......@@ -3533,8 +3535,10 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
return 0;
out_unwind:
while (--i >= 0)
blk_mq_free_map_and_requests(set, i);
while (--i >= 0) {
blk_mq_free_map_and_rqs(set, set->tags[i], i);
set->tags[i] = NULL;
}
return -ENOMEM;
}
......@@ -3724,8 +3728,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
return 0;
out_free_mq_rq_maps:
for (i = 0; i < set->nr_hw_queues; i++)
blk_mq_free_map_and_requests(set, i);
for (i = 0; i < set->nr_hw_queues; i++) {
blk_mq_free_map_and_rqs(set, set->tags[i], i);
set->tags[i] = NULL;
}
out_free_mq_map:
for (i = 0; i < set->nr_maps; i++) {
kfree(set->map[i].mq_map);
......@@ -3741,8 +3747,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
int i, j;
for (i = 0; i < set->nr_hw_queues; i++)
blk_mq_free_map_and_requests(set, i);
for (i = 0; i < set->nr_hw_queues; i++) {
blk_mq_free_map_and_rqs(set, set->tags[i], i);
set->tags[i] = NULL;
}
if (blk_mq_is_sbitmap_shared(set->flags))
blk_mq_exit_shared_sbitmap(set);
......@@ -3932,7 +3940,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
nr_hw_queues, prev_nr_hw_queues);
for (; i < set->nr_hw_queues; i++)
blk_mq_free_map_and_requests(set, i);
blk_mq_free_map_and_rqs(set, set->tags[i], i);
set->nr_hw_queues = prev_nr_hw_queues;
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
......
......@@ -57,7 +57,9 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx, unsigned int depth);
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags,
unsigned int hctx_idx);
/*
* Internal helpers for request insertion into sw queues
*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册