提交 fa72b903 编写于 作者: T Tejun Heo 提交者: Linus Torvalds

[PATCH] blk: remove blk_queue_tag->real_max_depth optimization

blk_queue_tag->real_max_depth was used to optimize out unnecessary
allocations/frees on tag resize.  However, the whole thing was very broken -
tag_map was never allocated to real_max_depth resulting in access beyond the
end of the map, bits in [max_depth..real_max_depth] were set when initializing
a map and copied when resizing resulting in pre-occupied tags.

As the gain of the optimization is very small, well, almost nill, remove the
whole thing.
Signed-off-by: NTejun Heo <htejun@gmail.com>
Acked-by: NJens Axboe <axboe@suse.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 2bf0fdad
...@@ -717,7 +717,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) ...@@ -717,7 +717,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
{ {
struct blk_queue_tag *bqt = q->queue_tags; struct blk_queue_tag *bqt = q->queue_tags;
if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) if (unlikely(bqt == NULL || tag >= bqt->max_depth))
return NULL; return NULL;
return bqt->tag_index[tag]; return bqt->tag_index[tag];
...@@ -775,9 +775,9 @@ EXPORT_SYMBOL(blk_queue_free_tags); ...@@ -775,9 +775,9 @@ EXPORT_SYMBOL(blk_queue_free_tags);
static int static int
init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
{ {
int bits, i;
struct request **tag_index; struct request **tag_index;
unsigned long *tag_map; unsigned long *tag_map;
int nr_ulongs;
if (depth > q->nr_requests * 2) { if (depth > q->nr_requests * 2) {
depth = q->nr_requests * 2; depth = q->nr_requests * 2;
...@@ -789,24 +789,17 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) ...@@ -789,24 +789,17 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
if (!tag_index) if (!tag_index)
goto fail; goto fail;
bits = (depth / BLK_TAGS_PER_LONG) + 1; nr_ulongs = ALIGN(depth, BLK_TAGS_PER_LONG) / BLK_TAGS_PER_LONG;
tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC); tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
if (!tag_map) if (!tag_map)
goto fail; goto fail;
memset(tag_index, 0, depth * sizeof(struct request *)); memset(tag_index, 0, depth * sizeof(struct request *));
memset(tag_map, 0, bits * sizeof(unsigned long)); memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
tags->max_depth = depth; tags->max_depth = depth;
tags->real_max_depth = bits * BITS_PER_LONG;
tags->tag_index = tag_index; tags->tag_index = tag_index;
tags->tag_map = tag_map; tags->tag_map = tag_map;
/*
* set the upper bits if the depth isn't a multiple of the word size
*/
for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
__set_bit(i, tag_map);
return 0; return 0;
fail: fail:
kfree(tag_index); kfree(tag_index);
...@@ -871,32 +864,24 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) ...@@ -871,32 +864,24 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
struct blk_queue_tag *bqt = q->queue_tags; struct blk_queue_tag *bqt = q->queue_tags;
struct request **tag_index; struct request **tag_index;
unsigned long *tag_map; unsigned long *tag_map;
int bits, max_depth; int max_depth, nr_ulongs;
if (!bqt) if (!bqt)
return -ENXIO; return -ENXIO;
/*
* don't bother sizing down
*/
if (new_depth <= bqt->real_max_depth) {
bqt->max_depth = new_depth;
return 0;
}
/* /*
* save the old state info, so we can copy it back * save the old state info, so we can copy it back
*/ */
tag_index = bqt->tag_index; tag_index = bqt->tag_index;
tag_map = bqt->tag_map; tag_map = bqt->tag_map;
max_depth = bqt->real_max_depth; max_depth = bqt->max_depth;
if (init_tag_map(q, bqt, new_depth)) if (init_tag_map(q, bqt, new_depth))
return -ENOMEM; return -ENOMEM;
memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
bits = max_depth / BLK_TAGS_PER_LONG; nr_ulongs = ALIGN(max_depth, BLK_TAGS_PER_LONG) / BLK_TAGS_PER_LONG;
memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long)); memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
kfree(tag_index); kfree(tag_index);
kfree(tag_map); kfree(tag_map);
...@@ -926,7 +911,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) ...@@ -926,7 +911,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
BUG_ON(tag == -1); BUG_ON(tag == -1);
if (unlikely(tag >= bqt->real_max_depth)) if (unlikely(tag >= bqt->max_depth))
return; return;
if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) { if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
......
...@@ -294,7 +294,6 @@ struct blk_queue_tag { ...@@ -294,7 +294,6 @@ struct blk_queue_tag {
struct list_head busy_list; /* fifo list of busy tags */ struct list_head busy_list; /* fifo list of busy tags */
int busy; /* current depth */ int busy; /* current depth */
int max_depth; /* what we will send to device */ int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */
atomic_t refcnt; /* map can be shared */ atomic_t refcnt; /* map can be shared */
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册