提交 705cda97 编写于 作者: B Bart Van Assche 提交者: Jens Axboe

blk-mq: Make it safe to use RCU to iterate over blk_mq_tag_set.tag_list

Since the next patch in this series will use RCU to iterate over
tag_list, make this safe. Add lockdep_assert_held() statements
in functions that iterate over tag_list to make clear that using
list_for_each_entry() instead of list_for_each_entry_rcu() is
fine in these functions.
Signed-off-by: NBart Van Assche <bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 d945a365
...@@ -2111,6 +2111,8 @@ static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) ...@@ -2111,6 +2111,8 @@ static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
{ {
struct request_queue *q; struct request_queue *q;
lockdep_assert_held(&set->tag_list_lock);
list_for_each_entry(q, &set->tag_list, tag_set_list) { list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
queue_set_hctx_shared(q, shared); queue_set_hctx_shared(q, shared);
...@@ -2123,7 +2125,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) ...@@ -2123,7 +2125,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
mutex_lock(&set->tag_list_lock); mutex_lock(&set->tag_list_lock);
list_del_init(&q->tag_set_list); list_del_rcu(&q->tag_set_list);
INIT_LIST_HEAD(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) { if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */ /* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED; set->flags &= ~BLK_MQ_F_TAG_SHARED;
...@@ -2131,6 +2134,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) ...@@ -2131,6 +2134,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
blk_mq_update_tag_set_depth(set, false); blk_mq_update_tag_set_depth(set, false);
} }
mutex_unlock(&set->tag_list_lock); mutex_unlock(&set->tag_list_lock);
synchronize_rcu();
} }
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
...@@ -2148,7 +2153,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, ...@@ -2148,7 +2153,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
} }
if (set->flags & BLK_MQ_F_TAG_SHARED) if (set->flags & BLK_MQ_F_TAG_SHARED)
queue_set_hctx_shared(q, true); queue_set_hctx_shared(q, true);
list_add_tail(&q->tag_set_list, &set->tag_list); list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
mutex_unlock(&set->tag_list_lock); mutex_unlock(&set->tag_list_lock);
} }
...@@ -2639,6 +2644,8 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) ...@@ -2639,6 +2644,8 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{ {
struct request_queue *q; struct request_queue *q;
lockdep_assert_held(&set->tag_list_lock);
if (nr_hw_queues > nr_cpu_ids) if (nr_hw_queues > nr_cpu_ids)
nr_hw_queues = nr_cpu_ids; nr_hw_queues = nr_cpu_ids;
if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册