提交 1bcb1ead 编写于 作者: M Ming Lei 提交者: Jens Axboe

blk-mq: allocate flush_rq in blk_mq_init_flush()

It is reasonable to allocate flush req in blk_mq_init_flush().
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NMing Lei <ming.lei@canonical.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 08e98fc6
...@@ -472,7 +472,16 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, ...@@ -472,7 +472,16 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
} }
EXPORT_SYMBOL(blkdev_issue_flush); EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q) int blk_mq_init_flush(struct request_queue *q)
{ {
struct blk_mq_tag_set *set = q->tag_set;
spin_lock_init(&q->mq_flush_lock); spin_lock_init(&q->mq_flush_lock);
q->flush_rq = kzalloc(round_up(sizeof(struct request) +
set->cmd_size, cache_line_size()),
GFP_KERNEL);
if (!q->flush_rq)
return -ENOMEM;
return 0;
} }
...@@ -1848,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1848,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
if (set->ops->complete) if (set->ops->complete)
blk_queue_softirq_done(q, set->ops->complete); blk_queue_softirq_done(q, set->ops->complete);
blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_init_cpu_queues(q, set->nr_hw_queues);
q->flush_rq = kzalloc(round_up(sizeof(struct request) +
set->cmd_size, cache_line_size()),
GFP_KERNEL);
if (!q->flush_rq)
goto err_hw;
if (blk_mq_init_hw_queues(q, set)) if (blk_mq_init_hw_queues(q, set))
goto err_flush_rq; goto err_hw;
mutex_lock(&all_q_mutex); mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list); list_add_tail(&q->all_q_node, &all_q_list);
...@@ -1866,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1866,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_add_queue_tag_set(set, q); blk_mq_add_queue_tag_set(set, q);
if (blk_mq_init_flush(q))
goto err_hw_queues;
blk_mq_map_swqueue(q); blk_mq_map_swqueue(q);
return q; return q;
err_flush_rq: err_hw_queues:
kfree(q->flush_rq); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
err_hw: err_hw:
blk_cleanup_queue(q); blk_cleanup_queue(q);
err_hctxs: err_hctxs:
......
...@@ -27,7 +27,7 @@ struct blk_mq_ctx { ...@@ -27,7 +27,7 @@ struct blk_mq_ctx {
void __blk_mq_complete_request(struct request *rq); void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q); int blk_mq_init_flush(struct request_queue *q);
void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq, void blk_mq_clone_flush_request(struct request *flush_rq,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册