diff --git a/block/blk-mq.c b/block/blk-mq.c index 01ec97aa9ec8195435c4666749ea8f3753e3155b..4b2507cdece3ab41be5d787622fe698757772c3e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3990,6 +3990,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq) } EXPORT_SYMBOL(blk_mq_rq_cpu); +void blk_mq_cancel_work_sync(struct request_queue *q) +{ + if (queue_is_mq(q)) { + struct blk_mq_hw_ctx *hctx; + int i; + + cancel_delayed_work_sync(&q->requeue_work); + + queue_for_each_hw_ctx(q, hctx, i) + cancel_delayed_work_sync(&hctx->run_work); + } +} + static int __init blk_mq_init(void) { int i; diff --git a/block/blk-mq.h b/block/blk-mq.h index f792a0920ebb13e3857e6a5d3397e13775c67e53..6f87c0681443b52f052a1f638621470a574d5aec 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -129,6 +129,7 @@ extern int blk_mq_sysfs_register(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); +void blk_mq_cancel_work_sync(struct request_queue *q); void blk_mq_release(struct request_queue *q); static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, diff --git a/block/elevator.c b/block/elevator.c index 4ce6b22813a14690f4bb70795712111fec02e704..27eb70ec277ae90c05177a9d5b774b38b84af28e 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -684,12 +684,18 @@ void elevator_init_mq(struct request_queue *q) if (!e) return; + /* + * We are called before adding disk, when there isn't any FS I/O, + * so freezing queue plus canceling dispatch work is enough to + * drain any dispatch activities originated from passthrough + * requests, then no need to quiesce queue which may add long boot + * latency, especially when lots of disks are involved. + */ blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); + blk_mq_cancel_work_sync(q); err = blk_mq_init_sched(q, e); - blk_mq_unquiesce_queue(q); blk_mq_unfreeze_queue(q); if (err) {