提交 e8099177 编写于 作者: H Hannes Reinecke 提交者: Mike Snitzer

dm mpath: push back requests instead of queueing

There is no reason why multipath needs to queue requests internally for
queue_if_no_path or pg_init; we should rather push them back onto the
request queue.

And while we're at it we can simplify the conditional statement in
map_io() to make it easier to read.

Since mpath no longer does internal queuing of I/O the table info no
longer emits the internal queue_size.  Instead it displays 1 if queuing
is being used or 0 if it is not.
Signed-off-by: NHannes Reinecke <hare@suse.de>
Signed-off-by: NMike Snitzer <snitzer@redhat.com>
Reviewed-by: NJun'ichi Nomura <j-nomura@ce.jp.nec.com>
上级 9974fa2c
...@@ -93,9 +93,7 @@ struct multipath { ...@@ -93,9 +93,7 @@ struct multipath {
unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
unsigned queue_size;
struct work_struct process_queued_ios; struct work_struct process_queued_ios;
struct list_head queued_ios;
struct work_struct trigger_event; struct work_struct trigger_event;
...@@ -124,6 +122,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; ...@@ -124,6 +122,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void process_queued_ios(struct work_struct *work); static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work); static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work); static void activate_path(struct work_struct *work);
static int __pgpath_busy(struct pgpath *pgpath);
/*----------------------------------------------- /*-----------------------------------------------
...@@ -195,7 +194,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) ...@@ -195,7 +194,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m = kzalloc(sizeof(*m), GFP_KERNEL); m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) { if (m) {
INIT_LIST_HEAD(&m->priority_groups); INIT_LIST_HEAD(&m->priority_groups);
INIT_LIST_HEAD(&m->queued_ios);
spin_lock_init(&m->lock); spin_lock_init(&m->lock);
m->queue_io = 1; m->queue_io = 1;
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
...@@ -368,12 +366,15 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes) ...@@ -368,12 +366,15 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
*/ */
static int __must_push_back(struct multipath *m) static int __must_push_back(struct multipath *m)
{ {
return (m->queue_if_no_path != m->saved_queue_if_no_path && return (m->queue_if_no_path ||
dm_noflush_suspending(m->ti)); (m->queue_if_no_path != m->saved_queue_if_no_path &&
dm_noflush_suspending(m->ti)));
} }
#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required)
static int map_io(struct multipath *m, struct request *clone, static int map_io(struct multipath *m, struct request *clone,
union map_info *map_context, unsigned was_queued) union map_info *map_context)
{ {
int r = DM_MAPIO_REMAPPED; int r = DM_MAPIO_REMAPPED;
size_t nr_bytes = blk_rq_bytes(clone); size_t nr_bytes = blk_rq_bytes(clone);
...@@ -391,37 +392,28 @@ static int map_io(struct multipath *m, struct request *clone, ...@@ -391,37 +392,28 @@ static int map_io(struct multipath *m, struct request *clone,
pgpath = m->current_pgpath; pgpath = m->current_pgpath;
if (was_queued) if (pgpath) {
m->queue_size--; if (pg_ready(m)) {
bdev = pgpath->path.dev->bdev;
if (m->pg_init_required) { clone->q = bdev_get_queue(bdev);
if (!m->pg_init_in_progress) clone->rq_disk = bdev->bd_disk;
queue_work(kmultipathd, &m->process_queued_ios); mpio->pgpath = pgpath;
r = DM_MAPIO_REQUEUE; mpio->nr_bytes = nr_bytes;
} else if ((pgpath && m->queue_io) || if (pgpath->pg->ps.type->start_io)
(!pgpath && m->queue_if_no_path)) { pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
/* Queue for the daemon to resubmit */ &pgpath->path,
list_add_tail(&clone->queuelist, &m->queued_ios); nr_bytes);
m->queue_size++; } else {
if (!m->queue_io) __pg_init_all_paths(m);
queue_work(kmultipathd, &m->process_queued_ios); r = DM_MAPIO_REQUEUE;
pgpath = NULL; }
r = DM_MAPIO_SUBMITTED; } else {
} else if (pgpath) { /* No path */
bdev = pgpath->path.dev->bdev; if (__must_push_back(m))
clone->q = bdev_get_queue(bdev); r = DM_MAPIO_REQUEUE;
clone->rq_disk = bdev->bd_disk; else
} else if (__must_push_back(m)) r = -EIO; /* Failed */
r = DM_MAPIO_REQUEUE; }
else
r = -EIO; /* Failed */
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
nr_bytes);
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
...@@ -443,7 +435,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, ...@@ -443,7 +435,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
else else
m->saved_queue_if_no_path = queue_if_no_path; m->saved_queue_if_no_path = queue_if_no_path;
m->queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path;
if (!m->queue_if_no_path && m->queue_size) if (!m->queue_if_no_path)
queue_work(kmultipathd, &m->process_queued_ios); queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
...@@ -451,40 +443,6 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, ...@@ -451,40 +443,6 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
return 0; return 0;
} }
/*-----------------------------------------------------------------
* The multipath daemon is responsible for resubmitting queued ios.
*---------------------------------------------------------------*/
static void dispatch_queued_ios(struct multipath *m)
{
int r;
unsigned long flags;
union map_info *info;
struct request *clone, *n;
LIST_HEAD(cl);
spin_lock_irqsave(&m->lock, flags);
list_splice_init(&m->queued_ios, &cl);
spin_unlock_irqrestore(&m->lock, flags);
list_for_each_entry_safe(clone, n, &cl, queuelist) {
list_del_init(&clone->queuelist);
info = dm_get_rq_mapinfo(clone);
r = map_io(m, clone, info, 1);
if (r < 0) {
clear_mapinfo(m, info);
dm_kill_unmapped_request(clone, r);
} else if (r == DM_MAPIO_REMAPPED)
dm_dispatch_request(clone);
else if (r == DM_MAPIO_REQUEUE) {
clear_mapinfo(m, info);
dm_requeue_unmapped_request(clone);
}
}
}
static void process_queued_ios(struct work_struct *work) static void process_queued_ios(struct work_struct *work)
{ {
struct multipath *m = struct multipath *m =
...@@ -509,7 +467,7 @@ static void process_queued_ios(struct work_struct *work) ...@@ -509,7 +467,7 @@ static void process_queued_ios(struct work_struct *work)
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
if (!must_queue) if (!must_queue)
dispatch_queued_ios(m); dm_table_run_md_queue_async(m->ti->table);
} }
/* /*
...@@ -987,7 +945,7 @@ static int multipath_map(struct dm_target *ti, struct request *clone, ...@@ -987,7 +945,7 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
return DM_MAPIO_REQUEUE; return DM_MAPIO_REQUEUE;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
r = map_io(m, clone, map_context, 0); r = map_io(m, clone, map_context);
if (r < 0 || r == DM_MAPIO_REQUEUE) if (r < 0 || r == DM_MAPIO_REQUEUE)
clear_mapinfo(m, map_context); clear_mapinfo(m, map_context);
...@@ -1056,7 +1014,7 @@ static int reinstate_path(struct pgpath *pgpath) ...@@ -1056,7 +1014,7 @@ static int reinstate_path(struct pgpath *pgpath)
pgpath->is_active = 1; pgpath->is_active = 1;
if (!m->nr_valid_paths++ && m->queue_size) { if (!m->nr_valid_paths++) {
m->current_pgpath = NULL; m->current_pgpath = NULL;
queue_work(kmultipathd, &m->process_queued_ios); queue_work(kmultipathd, &m->process_queued_ios);
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
...@@ -1435,7 +1393,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type, ...@@ -1435,7 +1393,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
/* Features */ /* Features */
if (type == STATUSTYPE_INFO) if (type == STATUSTYPE_INFO)
DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
else { else {
DMEMIT("%u ", m->queue_if_no_path + DMEMIT("%u ", m->queue_if_no_path +
(m->pg_init_retries > 0) * 2 + (m->pg_init_retries > 0) * 2 +
...@@ -1686,7 +1644,7 @@ static int multipath_busy(struct dm_target *ti) ...@@ -1686,7 +1644,7 @@ static int multipath_busy(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
/* pg_init in progress, requeue until done */ /* pg_init in progress, requeue until done */
if (m->pg_init_in_progress) { if (!pg_ready(m)) {
busy = 1; busy = 1;
goto out; goto out;
} }
...@@ -1739,7 +1697,7 @@ static int multipath_busy(struct dm_target *ti) ...@@ -1739,7 +1697,7 @@ static int multipath_busy(struct dm_target *ti)
*---------------------------------------------------------------*/ *---------------------------------------------------------------*/
static struct target_type multipath_target = { static struct target_type multipath_target = {
.name = "multipath", .name = "multipath",
.version = {1, 6, 0}, .version = {1, 7, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = multipath_ctr, .ctr = multipath_ctr,
.dtr = multipath_dtr, .dtr = multipath_dtr,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册