提交 8cf7961d 编写于 作者: C Christoph Hellwig 提交者: Jens Axboe

block: bypass ->make_request_fn for blk-mq drivers

Call blk_mq_make_request when no ->make_request_fn is set.  This is
safe now that blk_alloc_queue always sets up the pointer for make_request
based drivers.  This avoids an indirect call in the blk-mq driver I/O
fast path, which is rather expensive due to spectre mitigations.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 ae3cc8d8
...@@ -1073,7 +1073,10 @@ blk_qc_t generic_make_request(struct bio *bio) ...@@ -1073,7 +1073,10 @@ blk_qc_t generic_make_request(struct bio *bio)
/* Create a fresh bio_list for all subordinate requests */ /* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]); bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio); if (q->make_request_fn)
ret = q->make_request_fn(q, bio);
else
ret = blk_mq_make_request(q, bio);
blk_queue_exit(q); blk_queue_exit(q);
...@@ -1113,9 +1116,7 @@ EXPORT_SYMBOL(generic_make_request); ...@@ -1113,9 +1116,7 @@ EXPORT_SYMBOL(generic_make_request);
* *
* This function behaves like generic_make_request(), but does not protect * This function behaves like generic_make_request(), but does not protect
* against recursion. Must only be used if the called driver is known * against recursion. Must only be used if the called driver is known
* to not call generic_make_request (or direct_make_request) again from * to be blk-mq based.
* its make_request function. (Calling direct_make_request again from
* a workqueue is perfectly fine as that doesn't recurse).
*/ */
blk_qc_t direct_make_request(struct bio *bio) blk_qc_t direct_make_request(struct bio *bio)
{ {
...@@ -1123,20 +1124,27 @@ blk_qc_t direct_make_request(struct bio *bio) ...@@ -1123,20 +1124,27 @@ blk_qc_t direct_make_request(struct bio *bio)
bool nowait = bio->bi_opf & REQ_NOWAIT; bool nowait = bio->bi_opf & REQ_NOWAIT;
blk_qc_t ret; blk_qc_t ret;
if (WARN_ON_ONCE(q->make_request_fn))
goto io_error;
if (!generic_make_request_checks(bio)) if (!generic_make_request_checks(bio))
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
if (nowait && !blk_queue_dying(q)) if (nowait && !blk_queue_dying(q))
bio_wouldblock_error(bio); goto would_block;
else goto io_error;
bio_io_error(bio);
return BLK_QC_T_NONE;
} }
ret = q->make_request_fn(q, bio); ret = blk_mq_make_request(q, bio);
blk_queue_exit(q); blk_queue_exit(q);
return ret; return ret;
would_block:
bio_wouldblock_error(bio);
return BLK_QC_T_NONE;
io_error:
bio_io_error(bio);
return BLK_QC_T_NONE;
} }
EXPORT_SYMBOL_GPL(direct_make_request); EXPORT_SYMBOL_GPL(direct_make_request);
......
...@@ -1984,7 +1984,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) ...@@ -1984,7 +1984,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
* *
* Returns: Request queue cookie. * Returns: Request queue cookie.
*/ */
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{ {
const int is_sync = op_is_sync(bio->bi_opf); const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf);
...@@ -2096,6 +2096,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -2096,6 +2096,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
return cookie; return cookie;
} }
EXPORT_SYMBOL_GPL(blk_mq_make_request); /* only for request based dm */
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx) unsigned int hctx_idx)
...@@ -2955,7 +2956,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2955,7 +2956,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&q->requeue_list); INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock); spin_lock_init(&q->requeue_lock);
q->make_request_fn = blk_mq_make_request;
q->nr_requests = set->queue_depth; q->nr_requests = set->queue_depth;
/* /*
......
...@@ -1788,6 +1788,9 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) ...@@ -1788,6 +1788,9 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
int srcu_idx; int srcu_idx;
struct dm_table *map; struct dm_table *map;
if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED)
return blk_mq_make_request(q, bio);
map = dm_get_live_table(md, &srcu_idx); map = dm_get_live_table(md, &srcu_idx);
/* if we're suspended, we have to queue this io for later */ /* if we're suspended, we have to queue this io for later */
......
...@@ -578,4 +578,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq) ...@@ -578,4 +578,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq)
rq->q->mq_ops->cleanup_rq(rq); rq->q->mq_ops->cleanup_rq(rq);
} }
blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio);
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册