diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index da1de190a3b13ce2b62b3c1c1ec76df9fcf3e39d..2a689fddb127c982acd19282edf2877d706e4a5f 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -404,6 +404,13 @@ void blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct elevator_queue *e = hctx->queue->elevator; + /* + * blk_mq_sched_insert_requests() is called from flush plug + * context only, and hold one usage counter to prevent queue + * from being released. + */ + percpu_ref_get(&q->q_usage_counter); + if (e && e->type->ops.mq.insert_requests) e->type->ops.mq.insert_requests(hctx, list, false); else { @@ -415,12 +422,14 @@ void blk_mq_sched_insert_requests(struct request_queue *q, if (!hctx->dispatch_busy && !e && !run_queue_async) { blk_mq_try_issue_list_directly(hctx, list); if (list_empty(list)) - return; + goto out; } blk_mq_insert_requests(hctx, ctx, list); } blk_mq_run_hw_queue(hctx, run_queue_async); + out: + percpu_ref_put(&q->q_usage_counter); } static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,