提交 fb45f493 编写于 作者: L Linus Torvalds

Merge tag 'dm-4.1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper bugfixes from Mike Snitzer:
 "Fix two bugs in the request-based DM blk-mq support that was added
  during the 4.1 merge"

* tag 'dm-4.1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: fix free_rq_clone() NULL pointer when requeueing unmapped request
  dm: only initialize the request_queue once
...@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size) ...@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto err_unlock_md_type; goto err_unlock_md_type;
} }
if (dm_get_md_type(md) == DM_TYPE_NONE) if (dm_get_md_type(md) == DM_TYPE_NONE) {
/* Initial table load: acquire type of table. */ /* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t)); dm_set_md_type(md, dm_table_get_type(t));
else if (dm_get_md_type(md) != dm_table_get_type(t)) {
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md);
if (r) {
DMWARN("unable to set up device queue for new table.");
goto err_unlock_md_type;
}
} else if (dm_get_md_type(md) != dm_table_get_type(t)) {
DMWARN("can't change device type after initial table load."); DMWARN("can't change device type after initial table load.");
r = -EINVAL; r = -EINVAL;
goto err_unlock_md_type; goto err_unlock_md_type;
} }
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md);
if (r) {
DMWARN("unable to set up device queue for new table.");
goto err_unlock_md_type;
}
dm_unlock_md_type(md); dm_unlock_md_type(md);
/* stage inactive table */ /* stage inactive table */
......
...@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) ...@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md); dm_put(md);
} }
static void free_rq_clone(struct request *clone) static void free_rq_clone(struct request *clone, bool must_be_mapped)
{ {
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md; struct mapped_device *md = tio->md;
WARN_ON_ONCE(must_be_mapped && !clone->q);
blk_rq_unprep_clone(clone); blk_rq_unprep_clone(clone);
if (clone->q->mq_ops) if (md->type == DM_TYPE_MQ_REQUEST_BASED)
/* stacked on blk-mq queue(s) */
tio->ti->type->release_clone_rq(clone); tio->ti->type->release_clone_rq(clone);
else if (!md->queue->mq_ops) else if (!md->queue->mq_ops)
/* request_fn queue stacked on request_fn queue(s) */ /* request_fn queue stacked on request_fn queue(s) */
free_clone_request(md, clone); free_clone_request(md, clone);
/*
* NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
* no need to call free_clone_request() because we leverage blk-mq by
* allocating the clone at the end of the blk-mq pdu (see: clone_rq)
*/
if (!md->queue->mq_ops) if (!md->queue->mq_ops)
free_rq_tio(tio); free_rq_tio(tio);
...@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error) ...@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error)
rq->sense_len = clone->sense_len; rq->sense_len = clone->sense_len;
} }
free_rq_clone(clone); free_rq_clone(clone, true);
if (!rq->q->mq_ops) if (!rq->q->mq_ops)
blk_end_request_all(rq, error); blk_end_request_all(rq, error);
else else
...@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq) ...@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq)
} }
if (clone) if (clone)
free_rq_clone(clone); free_rq_clone(clone, false);
} }
/* /*
...@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) ...@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
{ {
struct request_queue *q = NULL; struct request_queue *q = NULL;
if (md->queue->elevator)
return 0;
/* Fully initialize the queue */ /* Fully initialize the queue */
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
if (!q) if (!q)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册