提交 59d13bf5 编写于 作者: J Jens Axboe

blk-mq: use sparser tag layout for lower queue depth

For best performance, spreading tags over multiple cachelines
makes the tagging more efficient on multicore systems. But since
we have 8 * sizeof(unsigned long) tags per cacheline, we don't
always get a nice spread.

Attempt to spread the tags over at least 4 cachelines, using fewer
number of bits per unsigned long if we have to. This improves
tagging performance in setups with 32-128 tags. For higher depths,
the spread is the same as before (BITS_PER_LONG tags per cacheline).
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 4bb659b1
...@@ -44,7 +44,7 @@ static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) ...@@ -44,7 +44,7 @@ static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag)
{ {
int tag, org_last_tag, end; int tag, org_last_tag, end;
org_last_tag = last_tag = TAG_TO_BIT(last_tag); org_last_tag = last_tag;
end = bm->depth; end = bm->depth;
do { do {
restart: restart:
...@@ -84,12 +84,12 @@ static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache) ...@@ -84,12 +84,12 @@ static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache)
int index, i, tag; int index, i, tag;
last_tag = org_last_tag = *tag_cache; last_tag = org_last_tag = *tag_cache;
index = TAG_TO_INDEX(last_tag); index = TAG_TO_INDEX(bt, last_tag);
for (i = 0; i < bt->map_nr; i++) { for (i = 0; i < bt->map_nr; i++) {
tag = __bt_get_word(&bt->map[index], last_tag); tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
if (tag != -1) { if (tag != -1) {
tag += index * BITS_PER_LONG; tag += (index << bt->bits_per_word);
goto done; goto done;
} }
...@@ -233,10 +233,10 @@ static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) ...@@ -233,10 +233,10 @@ static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
{ {
const int index = TAG_TO_INDEX(tag); const int index = TAG_TO_INDEX(bt, tag);
struct bt_wait_state *bs; struct bt_wait_state *bs;
clear_bit(TAG_TO_BIT(tag), &bt->map[index].word); clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
bs = bt_wake_ptr(bt); bs = bt_wake_ptr(bt);
if (bs && atomic_dec_and_test(&bs->wait_cnt)) { if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
...@@ -292,7 +292,7 @@ static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, ...@@ -292,7 +292,7 @@ static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
bit++; bit++;
} while (1); } while (1);
off += BITS_PER_LONG; off += (1 << bt->bits_per_word);
} }
} }
...@@ -333,14 +333,31 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, ...@@ -333,14 +333,31 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
{ {
int i; int i;
bt->bits_per_word = ilog2(BITS_PER_LONG);
/* /*
* Depth can be zero for reserved tags, that's not a failure * Depth can be zero for reserved tags, that's not a failure
* condition. * condition.
*/ */
if (depth) { if (depth) {
int nr, i, map_depth; unsigned int nr, i, map_depth, tags_per_word;
tags_per_word = (1 << bt->bits_per_word);
/*
* If the tag space is small, shrink the number of tags
* per word so we spread over a few cachelines, at least.
* If less than 4 tags, just forget about it, it's not
* going to work optimally anyway.
*/
if (depth >= 4) {
while (tags_per_word * 4 > depth) {
bt->bits_per_word--;
tags_per_word = (1 << bt->bits_per_word);
}
}
nr = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; nr = ALIGN(depth, tags_per_word) / tags_per_word;
bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap), bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap),
GFP_KERNEL, node); GFP_KERNEL, node);
if (!bt->map) if (!bt->map)
...@@ -349,8 +366,8 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, ...@@ -349,8 +366,8 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
bt->map_nr = nr; bt->map_nr = nr;
map_depth = depth; map_depth = depth;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
bt->map[i].depth = min(map_depth, BITS_PER_LONG); bt->map[i].depth = min(map_depth, tags_per_word);
map_depth -= BITS_PER_LONG; map_depth -= tags_per_word;
} }
} }
...@@ -443,8 +460,10 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) ...@@ -443,8 +460,10 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
if (!tags) if (!tags)
return 0; return 0;
page += sprintf(page, "nr_tags=%u, reserved_tags=%u\n", page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
tags->nr_tags, tags->nr_reserved_tags); "bits_per_word=%u\n",
tags->nr_tags, tags->nr_reserved_tags,
tags->bitmap_tags.bits_per_word);
free = bt_unused_tags(&tags->bitmap_tags); free = bt_unused_tags(&tags->bitmap_tags);
res = bt_unused_tags(&tags->breserved_tags); res = bt_unused_tags(&tags->breserved_tags);
......
...@@ -11,8 +11,8 @@ struct bt_wait_state { ...@@ -11,8 +11,8 @@ struct bt_wait_state {
wait_queue_head_t wait; wait_queue_head_t wait;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
#define TAG_TO_INDEX(tag) ((tag) / BITS_PER_LONG) #define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
#define TAG_TO_BIT(tag) ((tag) & (BITS_PER_LONG - 1)) #define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
struct blk_mq_bitmap { struct blk_mq_bitmap {
unsigned long word; unsigned long word;
...@@ -22,9 +22,10 @@ struct blk_mq_bitmap { ...@@ -22,9 +22,10 @@ struct blk_mq_bitmap {
struct blk_mq_bitmap_tags { struct blk_mq_bitmap_tags {
unsigned int depth; unsigned int depth;
unsigned int wake_cnt; unsigned int wake_cnt;
unsigned int bits_per_word;
struct blk_mq_bitmap *map;
unsigned int map_nr; unsigned int map_nr;
struct blk_mq_bitmap *map;
unsigned int wake_index; unsigned int wake_index;
struct bt_wait_state *bs; struct bt_wait_state *bs;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册