提交 c5248f79 编写于 作者: M Mike Snitzer

dm: remove support for stacking dm-mq on .request_fn device(s)

Remove all fiddley code that propped up this support for a blk-mq
request-queue ontop of all .request_fn devices.

Testing has proven this niche request-based dm-mq mode to be buggy, when
testing fault tolerance with DM multipath, and there is no point trying
to preserve it.

Should help improve efficiency of pure dm-mq code and make code
maintenance less delicate.
Signed-off-by: NMike Snitzer <snitzer@redhat.com>
上级 818c5f3b
...@@ -418,7 +418,10 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, ...@@ -418,7 +418,10 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
spin_unlock_irq(&m->lock); spin_unlock_irq(&m->lock);
if (clone) { if (clone) {
/* Old request-based interface: allocated clone is passed in */ /*
* Old request-based interface: allocated clone is passed in.
* Used by: .request_fn stacked on .request_fn path(s).
*/
clone->q = bdev_get_queue(bdev); clone->q = bdev_get_queue(bdev);
clone->rq_disk = bdev->bd_disk; clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
......
...@@ -1141,11 +1141,6 @@ static void free_rq_clone(struct request *clone) ...@@ -1141,11 +1141,6 @@ static void free_rq_clone(struct request *clone)
else if (!md->queue->mq_ops) else if (!md->queue->mq_ops)
/* request_fn queue stacked on request_fn queue(s) */ /* request_fn queue stacked on request_fn queue(s) */
free_clone_request(md, clone); free_clone_request(md, clone);
/*
* NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
* no need to call free_clone_request() because we leverage blk-mq by
* allocating the clone at the end of the blk-mq pdu (see: clone_rq)
*/
if (!md->queue->mq_ops) if (!md->queue->mq_ops)
free_rq_tio(tio); free_rq_tio(tio);
...@@ -1866,24 +1861,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md, ...@@ -1866,24 +1861,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
struct dm_rq_target_io *tio, gfp_t gfp_mask) struct dm_rq_target_io *tio, gfp_t gfp_mask)
{ {
/* /*
* Do not allocate a clone if tio->clone was already set * Create clone for use with .request_fn request_queue
* (see: dm_mq_queue_rq).
*/ */
bool alloc_clone = !tio->clone;
struct request *clone; struct request *clone;
if (alloc_clone) { clone = alloc_clone_request(md, gfp_mask);
clone = alloc_clone_request(md, gfp_mask); if (!clone)
if (!clone) return NULL;
return NULL;
} else
clone = tio->clone;
blk_rq_init(NULL, clone); blk_rq_init(NULL, clone);
if (setup_clone(clone, rq, tio, gfp_mask)) { if (setup_clone(clone, rq, tio, gfp_mask)) {
/* -ENOMEM */ /* -ENOMEM */
if (alloc_clone) free_clone_request(md, clone);
free_clone_request(md, clone);
return NULL; return NULL;
} }
...@@ -2692,22 +2681,12 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2692,22 +2681,12 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
*/ */
tio->ti = ti; tio->ti = ti;
/* /* Direct call is fine since .queue_rq allows allocations */
* Both the table and md type cannot change after initial table load if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
*/ /* Undo dm_start_request() before requeuing */
if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { rq_end_stats(md, rq);
/* clone request is allocated at the end of the pdu */ rq_completed(md, rq_data_dir(rq), false);
tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); return BLK_MQ_RQ_QUEUE_BUSY;
(void) clone_rq(rq, md, tio, GFP_ATOMIC);
queue_kthread_work(&md->kworker, &tio->work);
} else {
/* Direct call is fine since .queue_rq allows allocations */
if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md, rq_data_dir(rq), false);
return BLK_MQ_RQ_QUEUE_BUSY;
}
} }
return BLK_MQ_RQ_QUEUE_OK; return BLK_MQ_RQ_QUEUE_OK;
...@@ -2726,6 +2705,11 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) ...@@ -2726,6 +2705,11 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
struct request_queue *q; struct request_queue *q;
int err; int err;
if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
return -EINVAL;
}
md->tag_set = kzalloc(sizeof(struct blk_mq_tag_set), GFP_KERNEL); md->tag_set = kzalloc(sizeof(struct blk_mq_tag_set), GFP_KERNEL);
if (!md->tag_set) if (!md->tag_set)
return -ENOMEM; return -ENOMEM;
...@@ -2738,10 +2722,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) ...@@ -2738,10 +2722,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
md->tag_set->driver_data = md; md->tag_set->driver_data = md;
md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
if (md_type == DM_TYPE_REQUEST_BASED) {
/* put the memory for non-blk-mq clone at the end of the pdu */
md->tag_set->cmd_size += sizeof(struct request);
}
err = blk_mq_alloc_tag_set(md->tag_set); err = blk_mq_alloc_tag_set(md->tag_set);
if (err) if (err)
...@@ -2758,9 +2738,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) ...@@ -2758,9 +2738,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
/* backfill 'mq' sysfs registration normally done in blk_register_queue */ /* backfill 'mq' sysfs registration normally done in blk_register_queue */
blk_mq_register_disk(md->disk); blk_mq_register_disk(md->disk);
if (md_type == DM_TYPE_REQUEST_BASED)
init_rq_based_worker_thread(md);
return 0; return 0;
out_tag_set: out_tag_set:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册