diff --git a/block/blk-core.c b/block/blk-core.c index 223f32826e62d6e236ee5ce259091bd26d36348b..b8d1aa2d1008ffa8287fdddff879b262a1eb6746 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2397,7 +2397,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * * bypass a potential scheduler on the bottom device for * insert. */ - blk_mq_request_bypass_insert(rq); + blk_mq_request_bypass_insert(rq, true); return BLK_STS_OK; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 6eacc1dea8b74c9355117c251c5663bea0e946b7..021562bd5d2c340a6e0d214213220c45328afe96 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1492,7 +1492,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ -void blk_mq_request_bypass_insert(struct request *rq) +void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) { struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); @@ -1501,7 +1501,8 @@ void blk_mq_request_bypass_insert(struct request *rq) list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); - blk_mq_run_hw_queue(hctx, false); + if (run_queue) + blk_mq_run_hw_queue(hctx, false); } void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, diff --git a/block/blk-mq.h b/block/blk-mq.h index f97aceff76e91af732315a0ee4cc2be1ad48bb90..9fffec0ad8e9d29fe9ed3223d6d746abc81acfe5 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -56,7 +56,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, */ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head); -void blk_mq_request_bypass_insert(struct request *rq); +void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list);