提交 3a94180c 编写于 作者: Z Zhang Wensheng 提交者: Zheng Zengkai

Revert "blk-mq: fix kabi broken by "blk-mq: Use request queue-wide tags for tagset-wide sbitmap""

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5N1S5
CVE: NA

--------------------------------

This reverts commit b97f541e.

The related feilds will be modified in later patches which are
backported from mainline.
Signed-off-by: NZhang Wensheng <zhangwensheng5@huawei.com>
Reviewed-by: NYu Kuai <yukuai3@huawei.com>
Reviewed-by: NJason Yan <yanaijie@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 51ddf58f
...@@ -521,15 +521,13 @@ static void blk_timeout_work(struct work_struct *work) ...@@ -521,15 +521,13 @@ static void blk_timeout_work(struct work_struct *work)
struct request_queue *blk_alloc_queue(int node_id) struct request_queue *blk_alloc_queue(int node_id)
{ {
struct request_queue *q; struct request_queue *q;
struct request_queue_wrapper *q_wrapper;
int ret; int ret;
q_wrapper = kmem_cache_alloc_node(blk_requestq_cachep, q = kmem_cache_alloc_node(blk_requestq_cachep,
GFP_KERNEL | __GFP_ZERO, node_id); GFP_KERNEL | __GFP_ZERO, node_id);
if (!q_wrapper) if (!q)
return NULL; return NULL;
q = &q_wrapper->q;
q->last_merge = NULL; q->last_merge = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
...@@ -600,7 +598,7 @@ struct request_queue *blk_alloc_queue(int node_id) ...@@ -600,7 +598,7 @@ struct request_queue *blk_alloc_queue(int node_id)
fail_id: fail_id:
ida_simple_remove(&blk_queue_ida, q->id); ida_simple_remove(&blk_queue_ida, q->id);
fail_q: fail_q:
kmem_cache_free(blk_requestq_cachep, q_wrapper); kmem_cache_free(blk_requestq_cachep, q);
return NULL; return NULL;
} }
EXPORT_SYMBOL(blk_alloc_queue); EXPORT_SYMBOL(blk_alloc_queue);
...@@ -1821,7 +1819,7 @@ int __init blk_dev_init(void) ...@@ -1821,7 +1819,7 @@ int __init blk_dev_init(void)
panic("Failed to create kblockd\n"); panic("Failed to create kblockd\n");
blk_requestq_cachep = kmem_cache_create("request_queue", blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue_wrapper), 0, SLAB_PANIC, NULL); sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
blk_debugfs_root = debugfs_create_dir("block", NULL); blk_debugfs_root = debugfs_create_dir("block", NULL);
......
...@@ -555,14 +555,13 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) ...@@ -555,14 +555,13 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int ret, i; int ret, i;
struct request_queue_wrapper *q_wrapper = queue_to_wrapper(queue);
/* /*
* Set initial depth at max so that we don't need to reallocate for * Set initial depth at max so that we don't need to reallocate for
* updating nr_requests. * updating nr_requests.
*/ */
ret = blk_mq_init_bitmaps(&q_wrapper->sched_bitmap_tags, ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags,
&q_wrapper->sched_breserved_tags, &queue->sched_breserved_tags,
MAX_SCHED_RQ, set->reserved_tags, MAX_SCHED_RQ, set->reserved_tags,
set->numa_node, alloc_policy); set->numa_node, alloc_policy);
if (ret) if (ret)
...@@ -570,12 +569,12 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) ...@@ -570,12 +569,12 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
queue_for_each_hw_ctx(queue, hctx, i) { queue_for_each_hw_ctx(queue, hctx, i) {
hctx->sched_tags->bitmap_tags = hctx->sched_tags->bitmap_tags =
&q_wrapper->sched_bitmap_tags; &queue->sched_bitmap_tags;
hctx->sched_tags->breserved_tags = hctx->sched_tags->breserved_tags =
&q_wrapper->sched_breserved_tags; &queue->sched_breserved_tags;
} }
sbitmap_queue_resize(&q_wrapper->sched_bitmap_tags, sbitmap_queue_resize(&queue->sched_bitmap_tags,
queue->nr_requests - set->reserved_tags); queue->nr_requests - set->reserved_tags);
return 0; return 0;
...@@ -583,10 +582,8 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) ...@@ -583,10 +582,8 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue)
{ {
struct request_queue_wrapper *q_wrapper = queue_to_wrapper(queue); sbitmap_queue_free(&queue->sched_bitmap_tags);
sbitmap_queue_free(&queue->sched_breserved_tags);
sbitmap_queue_free(&q_wrapper->sched_bitmap_tags);
sbitmap_queue_free(&q_wrapper->sched_breserved_tags);
} }
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
......
...@@ -3746,7 +3746,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -3746,7 +3746,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i, ret; int i, ret;
struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q);
if (!set) if (!set)
return -EINVAL; return -EINVAL;
...@@ -3775,9 +3774,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -3775,9 +3774,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
nr, true); nr, true);
if (blk_mq_is_sbitmap_shared(set->flags)) { if (blk_mq_is_sbitmap_shared(set->flags)) {
hctx->sched_tags->bitmap_tags = hctx->sched_tags->bitmap_tags =
&q_wrapper->sched_bitmap_tags; &q->sched_bitmap_tags;
hctx->sched_tags->breserved_tags = hctx->sched_tags->breserved_tags =
&q_wrapper->sched_breserved_tags; &q->sched_breserved_tags;
} }
} }
if (ret) if (ret)
...@@ -3788,7 +3787,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -3788,7 +3787,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
if (!ret) { if (!ret) {
q->nr_requests = nr; q->nr_requests = nr;
if (q->elevator && blk_mq_is_sbitmap_shared(set->flags)) if (q->elevator && blk_mq_is_sbitmap_shared(set->flags))
sbitmap_queue_resize(&q_wrapper->sched_bitmap_tags, sbitmap_queue_resize(&q->sched_bitmap_tags,
nr - set->reserved_tags); nr - set->reserved_tags);
} }
......
...@@ -726,7 +726,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) ...@@ -726,7 +726,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
{ {
struct request_queue *q = container_of(rcu_head, struct request_queue, struct request_queue *q = container_of(rcu_head, struct request_queue,
rcu_head); rcu_head);
kmem_cache_free(blk_requestq_cachep, queue_to_wrapper(q)); kmem_cache_free(blk_requestq_cachep, q);
} }
/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
......
...@@ -28,19 +28,6 @@ struct blk_flush_queue { ...@@ -28,19 +28,6 @@ struct blk_flush_queue {
spinlock_t mq_flush_lock; spinlock_t mq_flush_lock;
}; };
/*
* The wrapper of request_queue to fix kabi while adding members.
*/
struct request_queue_wrapper {
struct request_queue q;
struct sbitmap_queue sched_bitmap_tags;
struct sbitmap_queue sched_breserved_tags;
};
#define queue_to_wrapper(queue) \
container_of(queue, struct request_queue_wrapper, q)
extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype; extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida; extern struct ida blk_queue_ida;
......
...@@ -499,6 +499,9 @@ struct request_queue { ...@@ -499,6 +499,9 @@ struct request_queue {
atomic_t nr_active_requests_shared_sbitmap; atomic_t nr_active_requests_shared_sbitmap;
struct sbitmap_queue sched_bitmap_tags;
struct sbitmap_queue sched_breserved_tags;
struct list_head icq_list; struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册