dm-mpath.c 40.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * Copyright (C) 2003 Sistina Software Limited.
 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
 *
 * This file is released under the GPL.
 */

8 9
#include <linux/device-mapper.h>

10
#include "dm.h"
L
Linus Torvalds 已提交
11
#include "dm-path-selector.h"
M
Mike Anderson 已提交
12
#include "dm-uevent.h"
L
Linus Torvalds 已提交
13

14
#include <linux/blkdev.h>
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/workqueue.h>
23
#include <linux/delay.h>
24
#include <scsi/scsi_dh.h>
A
Arun Sharma 已提交
25
#include <linux/atomic.h>
L
Linus Torvalds 已提交
26

27
#define DM_MSG_PREFIX "multipath"
28 29
#define DM_PG_INIT_DELAY_MSECS 2000
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
L
Linus Torvalds 已提交
30 31 32 33 34 35

/* Path properties */
struct pgpath {
	struct list_head list;

	struct priority_group *pg;	/* Owning PG */
36
	unsigned is_active;		/* Path status */
L
Linus Torvalds 已提交
37 38
	unsigned fail_count;		/* Cumulative failure count */

39
	struct dm_path path;
40
	struct delayed_work activate_path;
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
};

#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)

/*
 * Paths are grouped into Priority Groups and numbered from 1 upwards.
 * Each has a path selector which controls which path gets used.
 */
struct priority_group {
	struct list_head list;

	struct multipath *m;		/* Owning multipath instance */
	struct path_selector ps;

	unsigned pg_num;		/* Reference number */
	unsigned bypassed;		/* Temporarily bypass this PG? */

	unsigned nr_pgpaths;		/* Number of paths in PG */
	struct list_head pgpaths;
};

/* Multipath context */
struct multipath {
	struct list_head list;
	struct dm_target *ti;

67
	const char *hw_handler_name;
68
	char *hw_handler_params;
69

70 71
	spinlock_t lock;

L
Linus Torvalds 已提交
72 73
	unsigned nr_priority_groups;
	struct list_head priority_groups;
74 75 76

	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */

L
Linus Torvalds 已提交
77
	unsigned pg_init_required;	/* pg_init needs calling? */
78
	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
79
	unsigned pg_init_delay_retry;	/* Delay pg_init retry? */
L
Linus Torvalds 已提交
80 81 82 83 84 85 86

	unsigned nr_valid_paths;	/* Total number of usable paths */
	struct pgpath *current_pgpath;
	struct priority_group *current_pg;
	struct priority_group *next_pg;	/* Switch to this PG if set */
	unsigned repeat_count;		/* I/Os left before calling PS again */

87 88 89
	unsigned queue_io:1;		/* Must we queue all I/O? */
	unsigned queue_if_no_path:1;	/* Queue I/O if last path fails? */
	unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
90
	unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
91
	unsigned pg_init_disabled:1;	/* pg_init is not currently allowed */
92

D
Dave Wysochanski 已提交
93 94
	unsigned pg_init_retries;	/* Number of times to retry pg_init */
	unsigned pg_init_count;		/* Number of times pg_init called */
95
	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
L
Linus Torvalds 已提交
96 97 98 99

	struct work_struct trigger_event;

	/*
A
Alasdair G Kergon 已提交
100
	 * We must use a mempool of dm_mpath_io structs so that we
L
Linus Torvalds 已提交
101 102 103
	 * can resubmit bios on error.
	 */
	mempool_t *mpio_pool;
104 105

	struct mutex work_mutex;
L
Linus Torvalds 已提交
106 107 108 109 110
};

/*
 * Context information attached to each bio we process.
 */
A
Alasdair G Kergon 已提交
111
struct dm_mpath_io {
L
Linus Torvalds 已提交
112
	struct pgpath *pgpath;
113
	size_t nr_bytes;
L
Linus Torvalds 已提交
114 115 116 117
};

typedef int (*action_fn) (struct pgpath *pgpath);

118
static struct kmem_cache *_mpio_cache;
L
Linus Torvalds 已提交
119

120
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
D
David Howells 已提交
121
static void trigger_event(struct work_struct *work);
122
static void activate_path(struct work_struct *work);
123
static int __pgpath_busy(struct pgpath *pgpath);
L
Linus Torvalds 已提交
124 125 126 127 128 129 130 131


/*-----------------------------------------------
 * Allocation routines
 *-----------------------------------------------*/

static struct pgpath *alloc_pgpath(void)
{
M
Micha³ Miros³aw 已提交
132
	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
L
Linus Torvalds 已提交
133

134
	if (pgpath) {
135
		pgpath->is_active = 1;
136
		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
137
	}
L
Linus Torvalds 已提交
138 139 140 141

	return pgpath;
}

A
Alasdair G Kergon 已提交
142
static void free_pgpath(struct pgpath *pgpath)
L
Linus Torvalds 已提交
143 144 145 146 147 148 149 150
{
	kfree(pgpath);
}

static struct priority_group *alloc_priority_group(void)
{
	struct priority_group *pg;

M
Micha³ Miros³aw 已提交
151
	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
L
Linus Torvalds 已提交
152

M
Micha³ Miros³aw 已提交
153 154
	if (pg)
		INIT_LIST_HEAD(&pg->pgpaths);
L
Linus Torvalds 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183

	return pg;
}

static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
	struct pgpath *pgpath, *tmp;

	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
		list_del(&pgpath->list);
		dm_put_device(ti, pgpath->path.dev);
		free_pgpath(pgpath);
	}
}

static void free_priority_group(struct priority_group *pg,
				struct dm_target *ti)
{
	struct path_selector *ps = &pg->ps;

	if (ps->type) {
		ps->type->destroy(ps);
		dm_put_path_selector(ps->type);
	}

	free_pgpaths(&pg->pgpaths, ti);
	kfree(pg);
}

M
Micha³ Miros³aw 已提交
184
static struct multipath *alloc_multipath(struct dm_target *ti)
L
Linus Torvalds 已提交
185 186
{
	struct multipath *m;
187
	unsigned min_ios = dm_get_reserved_rq_based_ios();
L
Linus Torvalds 已提交
188

M
Micha³ Miros³aw 已提交
189
	m = kzalloc(sizeof(*m), GFP_KERNEL);
L
Linus Torvalds 已提交
190 191 192 193
	if (m) {
		INIT_LIST_HEAD(&m->priority_groups);
		spin_lock_init(&m->lock);
		m->queue_io = 1;
194
		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
D
David Howells 已提交
195
		INIT_WORK(&m->trigger_event, trigger_event);
196
		init_waitqueue_head(&m->pg_init_wait);
197
		mutex_init(&m->work_mutex);
198
		m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
L
Linus Torvalds 已提交
199 200 201 202
		if (!m->mpio_pool) {
			kfree(m);
			return NULL;
		}
M
Micha³ Miros³aw 已提交
203 204
		m->ti = ti;
		ti->private = m;
L
Linus Torvalds 已提交
205 206 207 208 209 210 211 212 213 214 215 216 217 218
	}

	return m;
}

static void free_multipath(struct multipath *m)
{
	struct priority_group *pg, *tmp;

	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
		list_del(&pg->list);
		free_priority_group(pg, m->ti);
	}

219
	kfree(m->hw_handler_name);
220
	kfree(m->hw_handler_params);
L
Linus Torvalds 已提交
221 222 223 224
	mempool_destroy(m->mpio_pool);
	kfree(m);
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
static int set_mapinfo(struct multipath *m, union map_info *info)
{
	struct dm_mpath_io *mpio;

	mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
	if (!mpio)
		return -ENOMEM;

	memset(mpio, 0, sizeof(*mpio));
	info->ptr = mpio;

	return 0;
}

static void clear_mapinfo(struct multipath *m, union map_info *info)
{
	struct dm_mpath_io *mpio = info->ptr;

	info->ptr = NULL;
	mempool_free(mpio, m->mpio_pool);
}
L
Linus Torvalds 已提交
246 247 248 249 250

/*-----------------------------------------------
 * Path selection
 *-----------------------------------------------*/

251
static int __pg_init_all_paths(struct multipath *m)
K
Kiyoshi Ueda 已提交
252 253
{
	struct pgpath *pgpath;
254
	unsigned long pg_init_delay = 0;
K
Kiyoshi Ueda 已提交
255

256
	if (m->pg_init_in_progress || m->pg_init_disabled)
257
		return 0;
258

K
Kiyoshi Ueda 已提交
259 260
	m->pg_init_count++;
	m->pg_init_required = 0;
261 262 263 264 265

	/* Check here to reset pg_init_required */
	if (!m->current_pg)
		return 0;

266 267 268
	if (m->pg_init_delay_retry)
		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
K
Kiyoshi Ueda 已提交
269 270 271 272
	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
		/* Skip failed paths */
		if (!pgpath->is_active)
			continue;
273 274
		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
				       pg_init_delay))
K
Kiyoshi Ueda 已提交
275 276
			m->pg_init_in_progress++;
	}
277
	return m->pg_init_in_progress;
K
Kiyoshi Ueda 已提交
278 279
}

L
Linus Torvalds 已提交
280 281 282 283 284
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
{
	m->current_pg = pgpath->pg;

	/* Must we initialise the PG first, and queue I/O till it's ready? */
285
	if (m->hw_handler_name) {
L
Linus Torvalds 已提交
286 287 288 289 290 291
		m->pg_init_required = 1;
		m->queue_io = 1;
	} else {
		m->pg_init_required = 0;
		m->queue_io = 0;
	}
D
Dave Wysochanski 已提交
292 293

	m->pg_init_count = 0;
L
Linus Torvalds 已提交
294 295
}

296 297
static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
			       size_t nr_bytes)
L
Linus Torvalds 已提交
298
{
299
	struct dm_path *path;
L
Linus Torvalds 已提交
300

301
	path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310 311 312
	if (!path)
		return -ENXIO;

	m->current_pgpath = path_to_pgpath(path);

	if (m->current_pg != pg)
		__switch_pg(m, m->current_pgpath);

	return 0;
}

313
static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
L
Linus Torvalds 已提交
314 315 316 317
{
	struct priority_group *pg;
	unsigned bypassed = 1;

318 319
	if (!m->nr_valid_paths) {
		m->queue_io = 0;
L
Linus Torvalds 已提交
320
		goto failed;
321
	}
L
Linus Torvalds 已提交
322 323 324 325 326

	/* Were we instructed to switch PG? */
	if (m->next_pg) {
		pg = m->next_pg;
		m->next_pg = NULL;
327
		if (!__choose_path_in_pg(m, pg, nr_bytes))
L
Linus Torvalds 已提交
328 329 330 331
			return;
	}

	/* Don't change PG until it has no remaining paths */
332
	if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
L
Linus Torvalds 已提交
333 334 335 336 337
		return;

	/*
	 * Loop through priority groups until we find a valid path.
	 * First time we skip PGs marked 'bypassed'.
338 339
	 * Second time we only try the ones we skipped, but set
	 * pg_init_delay_retry so we do not hammer controllers.
L
Linus Torvalds 已提交
340 341 342 343 344
	 */
	do {
		list_for_each_entry(pg, &m->priority_groups, list) {
			if (pg->bypassed == bypassed)
				continue;
345 346 347
			if (!__choose_path_in_pg(m, pg, nr_bytes)) {
				if (!bypassed)
					m->pg_init_delay_retry = 1;
L
Linus Torvalds 已提交
348
				return;
349
			}
L
Linus Torvalds 已提交
350 351 352 353 354 355 356 357
		}
	} while (bypassed--);

failed:
	m->current_pgpath = NULL;
	m->current_pg = NULL;
}

358 359 360 361 362 363 364 365 366 367 368 369 370
/*
 * Check whether bios must be queued in the device-mapper core rather
 * than here in the target.
 *
 * m->lock must be held on entry.
 *
 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
 * same value then we are not between multipath_presuspend()
 * and multipath_resume() calls and we have no need to check
 * for the DMF_NOFLUSH_SUSPENDING flag.
 */
static int __must_push_back(struct multipath *m)
{
371 372 373
	return (m->queue_if_no_path ||
		(m->queue_if_no_path != m->saved_queue_if_no_path &&
		 dm_noflush_suspending(m->ti)));
374 375
}

H
Hannes Reinecke 已提交
376 377 378
/*
 * Map cloned requests
 */
379 380 381
static int __multipath_map(struct dm_target *ti, struct request *clone,
			   union map_info *map_context,
			   struct request *rq, struct request **__clone)
L
Linus Torvalds 已提交
382
{
H
Hannes Reinecke 已提交
383
	struct multipath *m = (struct multipath *) ti->private;
384
	int r = DM_MAPIO_REQUEUE;
385
	size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
L
Linus Torvalds 已提交
386
	struct pgpath *pgpath;
387
	struct block_device *bdev;
388
	struct dm_mpath_io *mpio;
L
Linus Torvalds 已提交
389

390
	spin_lock_irq(&m->lock);
L
Linus Torvalds 已提交
391 392 393 394

	/* Do we need to select a new pgpath? */
	if (!m->current_pgpath ||
	    (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
395
		__choose_pgpath(m, nr_bytes);
L
Linus Torvalds 已提交
396 397 398

	pgpath = m->current_pgpath;

399 400 401 402
	if (!pgpath) {
		if (!__must_push_back(m))
			r = -EIO;	/* Failed */
		goto out_unlock;
403
	} else if (m->queue_io || m->pg_init_required) {
404
		__pg_init_all_paths(m);
405 406
		goto out_unlock;
	}
407

408 409 410 411
	if (set_mapinfo(m, map_context) < 0)
		/* ENOMEM, requeue */
		goto out_unlock;

412 413 414 415
	mpio = map_context->ptr;
	mpio->pgpath = pgpath;
	mpio->nr_bytes = nr_bytes;

416
	bdev = pgpath->path.dev->bdev;
417 418 419

	spin_unlock_irq(&m->lock);

420 421 422 423 424 425 426 427
	if (clone) {
		/* Old request-based interface: allocated clone is passed in */
		clone->q = bdev_get_queue(bdev);
		clone->rq_disk = bdev->bd_disk;
		clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
	} else {
		/* blk-mq request-based interface */
		*__clone = blk_get_request(bdev_get_queue(bdev),
428
					   rq_data_dir(rq), GFP_ATOMIC);
429
		if (IS_ERR(*__clone)) {
430
			/* ENOMEM, requeue */
431
			clear_mapinfo(m, map_context);
432
			return r;
433
		}
434 435 436 437 438
		(*__clone)->bio = (*__clone)->biotail = NULL;
		(*__clone)->rq_disk = bdev->bd_disk;
		(*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
	}

439 440 441 442
	if (pgpath->pg->ps.type->start_io)
		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
					      &pgpath->path,
					      nr_bytes);
443
	return DM_MAPIO_REMAPPED;
L
Linus Torvalds 已提交
444

445
out_unlock:
446
	spin_unlock_irq(&m->lock);
L
Linus Torvalds 已提交
447 448 449 450

	return r;
}

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
static int multipath_map(struct dm_target *ti, struct request *clone,
			 union map_info *map_context)
{
	return __multipath_map(ti, clone, map_context, NULL, NULL);
}

static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
				   union map_info *map_context,
				   struct request **clone)
{
	return __multipath_map(ti, NULL, map_context, rq, clone);
}

static void multipath_release_clone(struct request *clone)
{
	blk_put_request(clone);
}

L
Linus Torvalds 已提交
469 470 471
/*
 * If we run out of usable paths, should we queue I/O or error it?
 */
472 473
static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
			    unsigned save_old_value)
L
Linus Torvalds 已提交
474 475 476 477 478
{
	unsigned long flags;

	spin_lock_irqsave(&m->lock, flags);

479 480 481 482
	if (save_old_value)
		m->saved_queue_if_no_path = m->queue_if_no_path;
	else
		m->saved_queue_if_no_path = queue_if_no_path;
L
Linus Torvalds 已提交
483 484 485
	m->queue_if_no_path = queue_if_no_path;
	spin_unlock_irqrestore(&m->lock, flags);

486 487 488
	if (!queue_if_no_path)
		dm_table_run_md_queue_async(m->ti->table);

L
Linus Torvalds 已提交
489 490 491 492 493 494 495
	return 0;
}

/*
 * An event is triggered whenever a path is taken out of use.
 * Includes path failure and PG bypass.
 */
D
David Howells 已提交
496
static void trigger_event(struct work_struct *work)
L
Linus Torvalds 已提交
497
{
D
David Howells 已提交
498 499
	struct multipath *m =
		container_of(work, struct multipath, trigger_event);
L
Linus Torvalds 已提交
500 501 502 503 504 505 506 507 508 509 510 511 512 513

	dm_table_event(m->ti->table);
}

/*-----------------------------------------------------------------
 * Constructor/argument parsing:
 * <#multipath feature args> [<arg>]*
 * <#hw_handler args> [hw_handler [<arg>]*]
 * <#priority groups>
 * <initial priority group>
 *     [<selector> <#selector args> [<arg>]*
 *      <#paths> <#per-path selector args>
 *         [<path> [<arg>]* ]+ ]+
 *---------------------------------------------------------------*/
514
static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
L
Linus Torvalds 已提交
515 516 517 518 519 520
			       struct dm_target *ti)
{
	int r;
	struct path_selector_type *pst;
	unsigned ps_argc;

521
	static struct dm_arg _args[] = {
522
		{0, 1024, "invalid number of path selector args"},
L
Linus Torvalds 已提交
523 524
	};

525
	pst = dm_get_path_selector(dm_shift_arg(as));
L
Linus Torvalds 已提交
526
	if (!pst) {
527
		ti->error = "unknown path selector type";
L
Linus Torvalds 已提交
528 529 530
		return -EINVAL;
	}

531
	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
532 533
	if (r) {
		dm_put_path_selector(pst);
L
Linus Torvalds 已提交
534
		return -EINVAL;
535
	}
L
Linus Torvalds 已提交
536 537 538 539

	r = pst->create(&pg->ps, ps_argc, as->argv);
	if (r) {
		dm_put_path_selector(pst);
540
		ti->error = "path selector constructor failed";
L
Linus Torvalds 已提交
541 542 543 544
		return r;
	}

	pg->ps.type = pst;
545
	dm_consume_args(as, ps_argc);
L
Linus Torvalds 已提交
546 547 548 549

	return 0;
}

550
static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
L
Linus Torvalds 已提交
551 552 553 554
			       struct dm_target *ti)
{
	int r;
	struct pgpath *p;
555
	struct multipath *m = ti->private;
556 557
	struct request_queue *q = NULL;
	const char *attached_handler_name;
L
Linus Torvalds 已提交
558 559 560

	/* we need at least a path arg */
	if (as->argc < 1) {
561
		ti->error = "no device given";
562
		return ERR_PTR(-EINVAL);
L
Linus Torvalds 已提交
563 564 565 566
	}

	p = alloc_pgpath();
	if (!p)
567
		return ERR_PTR(-ENOMEM);
L
Linus Torvalds 已提交
568

569
	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
570
			  &p->path.dev);
L
Linus Torvalds 已提交
571
	if (r) {
572
		ti->error = "error getting device";
L
Linus Torvalds 已提交
573 574 575
		goto bad;
	}

576 577 578 579
	if (m->retain_attached_hw_handler || m->hw_handler_name)
		q = bdev_get_queue(p->path.dev->bdev);

	if (m->retain_attached_hw_handler) {
580
retain:
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
		if (attached_handler_name) {
			/*
			 * Reset hw_handler_name to match the attached handler
			 * and clear any hw_handler_params associated with the
			 * ignored handler.
			 *
			 * NB. This modifies the table line to show the actual
			 * handler instead of the original table passed in.
			 */
			kfree(m->hw_handler_name);
			m->hw_handler_name = attached_handler_name;

			kfree(m->hw_handler_params);
			m->hw_handler_params = NULL;
		}
	}
598

599
	if (m->hw_handler_name) {
600 601
		r = scsi_dh_attach(q, m->hw_handler_name);
		if (r == -EBUSY) {
602
			char b[BDEVNAME_SIZE];
603

604 605 606 607
			printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
				bdevname(p->path.dev->bdev, b));
			goto retain;
		}
608
		if (r < 0) {
609
			ti->error = "error attaching hardware handler";
610 611 612
			dm_put_device(ti, p->path.dev);
			goto bad;
		}
613 614 615 616 617 618 619 620 621 622

		if (m->hw_handler_params) {
			r = scsi_dh_set_params(q, m->hw_handler_params);
			if (r < 0) {
				ti->error = "unable to set hardware "
							"handler parameters";
				dm_put_device(ti, p->path.dev);
				goto bad;
			}
		}
623 624
	}

L
Linus Torvalds 已提交
625 626 627 628 629 630 631 632 633 634
	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
	if (r) {
		dm_put_device(ti, p->path.dev);
		goto bad;
	}

	return p;

 bad:
	free_pgpath(p);
635
	return ERR_PTR(r);
L
Linus Torvalds 已提交
636 637
}

638
static struct priority_group *parse_priority_group(struct dm_arg_set *as,
M
Micha³ Miros³aw 已提交
639
						   struct multipath *m)
L
Linus Torvalds 已提交
640
{
641
	static struct dm_arg _args[] = {
642 643
		{1, 1024, "invalid number of paths"},
		{0, 1024, "invalid number of selector args"}
L
Linus Torvalds 已提交
644 645 646
	};

	int r;
647
	unsigned i, nr_selector_args, nr_args;
L
Linus Torvalds 已提交
648
	struct priority_group *pg;
M
Micha³ Miros³aw 已提交
649
	struct dm_target *ti = m->ti;
L
Linus Torvalds 已提交
650 651 652

	if (as->argc < 2) {
		as->argc = 0;
653 654
		ti->error = "not enough priority group arguments";
		return ERR_PTR(-EINVAL);
L
Linus Torvalds 已提交
655 656 657 658
	}

	pg = alloc_priority_group();
	if (!pg) {
659
		ti->error = "couldn't allocate priority group";
660
		return ERR_PTR(-ENOMEM);
L
Linus Torvalds 已提交
661 662 663 664 665 666 667 668 669 670
	}
	pg->m = m;

	r = parse_path_selector(as, pg, ti);
	if (r)
		goto bad;

	/*
	 * read the paths
	 */
671
	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
L
Linus Torvalds 已提交
672 673 674
	if (r)
		goto bad;

675
	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
L
Linus Torvalds 已提交
676 677 678
	if (r)
		goto bad;

679
	nr_args = 1 + nr_selector_args;
L
Linus Torvalds 已提交
680 681
	for (i = 0; i < pg->nr_pgpaths; i++) {
		struct pgpath *pgpath;
682
		struct dm_arg_set path_args;
L
Linus Torvalds 已提交
683

684
		if (as->argc < nr_args) {
685
			ti->error = "not enough path parameters";
686
			r = -EINVAL;
L
Linus Torvalds 已提交
687
			goto bad;
688
		}
L
Linus Torvalds 已提交
689

690
		path_args.argc = nr_args;
L
Linus Torvalds 已提交
691 692 693
		path_args.argv = as->argv;

		pgpath = parse_path(&path_args, &pg->ps, ti);
694 695
		if (IS_ERR(pgpath)) {
			r = PTR_ERR(pgpath);
L
Linus Torvalds 已提交
696
			goto bad;
697
		}
L
Linus Torvalds 已提交
698 699 700

		pgpath->pg = pg;
		list_add_tail(&pgpath->list, &pg->pgpaths);
701
		dm_consume_args(as, nr_args);
L
Linus Torvalds 已提交
702 703 704 705 706 707
	}

	return pg;

 bad:
	free_priority_group(pg, ti);
708
	return ERR_PTR(r);
L
Linus Torvalds 已提交
709 710
}

711
static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
L
Linus Torvalds 已提交
712 713
{
	unsigned hw_argc;
714
	int ret;
M
Micha³ Miros³aw 已提交
715
	struct dm_target *ti = m->ti;
L
Linus Torvalds 已提交
716

717
	static struct dm_arg _args[] = {
718
		{0, 1024, "invalid number of hardware handler args"},
L
Linus Torvalds 已提交
719 720
	};

721
	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
L
Linus Torvalds 已提交
722 723 724 725 726
		return -EINVAL;

	if (!hw_argc)
		return 0;

727
	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
728

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
	if (hw_argc > 1) {
		char *p;
		int i, j, len = 4;

		for (i = 0; i <= hw_argc - 2; i++)
			len += strlen(as->argv[i]) + 1;
		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
		if (!p) {
			ti->error = "memory allocation failed";
			ret = -ENOMEM;
			goto fail;
		}
		j = sprintf(p, "%d", hw_argc - 1);
		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
			j = sprintf(p, "%s", as->argv[i]);
	}
745
	dm_consume_args(as, hw_argc - 1);
L
Linus Torvalds 已提交
746 747

	return 0;
748 749 750 751
fail:
	kfree(m->hw_handler_name);
	m->hw_handler_name = NULL;
	return ret;
L
Linus Torvalds 已提交
752 753
}

754
static int parse_features(struct dm_arg_set *as, struct multipath *m)
L
Linus Torvalds 已提交
755 756 757
{
	int r;
	unsigned argc;
M
Micha³ Miros³aw 已提交
758
	struct dm_target *ti = m->ti;
759
	const char *arg_name;
L
Linus Torvalds 已提交
760

761
	static struct dm_arg _args[] = {
762
		{0, 6, "invalid number of feature args"},
D
Dave Wysochanski 已提交
763
		{1, 50, "pg_init_retries must be between 1 and 50"},
764
		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
L
Linus Torvalds 已提交
765 766
	};

767
	r = dm_read_arg_group(_args, as, &argc, &ti->error);
L
Linus Torvalds 已提交
768 769 770 771 772 773
	if (r)
		return -EINVAL;

	if (!argc)
		return 0;

D
Dave Wysochanski 已提交
774
	do {
775
		arg_name = dm_shift_arg(as);
D
Dave Wysochanski 已提交
776 777
		argc--;

778
		if (!strcasecmp(arg_name, "queue_if_no_path")) {
D
Dave Wysochanski 已提交
779 780 781 782
			r = queue_if_no_path(m, 1, 0);
			continue;
		}

783 784 785 786 787
		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
			m->retain_attached_hw_handler = 1;
			continue;
		}

788
		if (!strcasecmp(arg_name, "pg_init_retries") &&
D
Dave Wysochanski 已提交
789
		    (argc >= 1)) {
790
			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
D
Dave Wysochanski 已提交
791 792 793 794
			argc--;
			continue;
		}

795
		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
796
		    (argc >= 1)) {
797
			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
798 799 800 801
			argc--;
			continue;
		}

L
Linus Torvalds 已提交
802
		ti->error = "Unrecognised multipath feature request";
D
Dave Wysochanski 已提交
803 804 805 806
		r = -EINVAL;
	} while (argc && !r);

	return r;
L
Linus Torvalds 已提交
807 808 809 810 811
}

static int multipath_ctr(struct dm_target *ti, unsigned int argc,
			 char **argv)
{
812 813
	/* target arguments */
	static struct dm_arg _args[] = {
814 815
		{0, 1024, "invalid number of priority groups"},
		{0, 1024, "invalid initial priority group number"},
L
Linus Torvalds 已提交
816 817 818 819
	};

	int r;
	struct multipath *m;
820
	struct dm_arg_set as;
L
Linus Torvalds 已提交
821 822 823 824 825 826
	unsigned pg_count = 0;
	unsigned next_pg_num;

	as.argc = argc;
	as.argv = argv;

M
Micha³ Miros³aw 已提交
827
	m = alloc_multipath(ti);
L
Linus Torvalds 已提交
828
	if (!m) {
829
		ti->error = "can't allocate multipath";
L
Linus Torvalds 已提交
830 831 832
		return -EINVAL;
	}

M
Micha³ Miros³aw 已提交
833
	r = parse_features(&as, m);
L
Linus Torvalds 已提交
834 835 836
	if (r)
		goto bad;

M
Micha³ Miros³aw 已提交
837
	r = parse_hw_handler(&as, m);
L
Linus Torvalds 已提交
838 839 840
	if (r)
		goto bad;

841
	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
L
Linus Torvalds 已提交
842 843 844
	if (r)
		goto bad;

845
	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
L
Linus Torvalds 已提交
846 847 848
	if (r)
		goto bad;

849 850 851 852 853 854 855
	if ((!m->nr_priority_groups && next_pg_num) ||
	    (m->nr_priority_groups && !next_pg_num)) {
		ti->error = "invalid initial priority group";
		r = -EINVAL;
		goto bad;
	}

L
Linus Torvalds 已提交
856 857 858 859
	/* parse the priority groups */
	while (as.argc) {
		struct priority_group *pg;

M
Micha³ Miros³aw 已提交
860
		pg = parse_priority_group(&as, m);
861 862
		if (IS_ERR(pg)) {
			r = PTR_ERR(pg);
L
Linus Torvalds 已提交
863 864 865 866 867 868 869 870 871 872 873 874
			goto bad;
		}

		m->nr_valid_paths += pg->nr_pgpaths;
		list_add_tail(&pg->list, &m->priority_groups);
		pg_count++;
		pg->pg_num = pg_count;
		if (!--next_pg_num)
			m->next_pg = pg;
	}

	if (pg_count != m->nr_priority_groups) {
875
		ti->error = "priority group count mismatch";
L
Linus Torvalds 已提交
876 877 878 879
		r = -EINVAL;
		goto bad;
	}

880 881
	ti->num_flush_bios = 1;
	ti->num_discard_bios = 1;
882
	ti->num_write_same_bios = 1;
M
Mikulas Patocka 已提交
883

L
Linus Torvalds 已提交
884 885 886 887 888 889 890
	return 0;

 bad:
	free_multipath(m);
	return r;
}

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
static void multipath_wait_for_pg_init_completion(struct multipath *m)
{
	DECLARE_WAITQUEUE(wait, current);
	unsigned long flags;

	add_wait_queue(&m->pg_init_wait, &wait);

	while (1) {
		set_current_state(TASK_UNINTERRUPTIBLE);

		spin_lock_irqsave(&m->lock, flags);
		if (!m->pg_init_in_progress) {
			spin_unlock_irqrestore(&m->lock, flags);
			break;
		}
		spin_unlock_irqrestore(&m->lock, flags);

		io_schedule();
	}
	set_current_state(TASK_RUNNING);

	remove_wait_queue(&m->pg_init_wait, &wait);
}

static void flush_multipath_work(struct multipath *m)
L
Linus Torvalds 已提交
916
{
917 918 919 920 921 922
	unsigned long flags;

	spin_lock_irqsave(&m->lock, flags);
	m->pg_init_disabled = 1;
	spin_unlock_irqrestore(&m->lock, flags);

923
	flush_workqueue(kmpath_handlerd);
924
	multipath_wait_for_pg_init_completion(m);
925
	flush_workqueue(kmultipathd);
926
	flush_work(&m->trigger_event);
927 928 929 930

	spin_lock_irqsave(&m->lock, flags);
	m->pg_init_disabled = 0;
	spin_unlock_irqrestore(&m->lock, flags);
931 932 933 934 935 936
}

static void multipath_dtr(struct dm_target *ti)
{
	struct multipath *m = ti->private;

937
	flush_multipath_work(m);
L
Linus Torvalds 已提交
938 939 940 941 942 943 944 945 946 947 948 949 950
	free_multipath(m);
}

/*
 * Take a path out of use.
 */
static int fail_path(struct pgpath *pgpath)
{
	unsigned long flags;
	struct multipath *m = pgpath->pg->m;

	spin_lock_irqsave(&m->lock, flags);

951
	if (!pgpath->is_active)
L
Linus Torvalds 已提交
952 953
		goto out;

954
	DMWARN("Failing path %s.", pgpath->path.dev->name);
L
Linus Torvalds 已提交
955 956

	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
957
	pgpath->is_active = 0;
L
Linus Torvalds 已提交
958 959 960 961 962 963 964
	pgpath->fail_count++;

	m->nr_valid_paths--;

	if (pgpath == m->current_pgpath)
		m->current_pgpath = NULL;

M
Mike Anderson 已提交
965 966 967
	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
		      pgpath->path.dev->name, m->nr_valid_paths);

968
	schedule_work(&m->trigger_event);
L
Linus Torvalds 已提交
969 970 971 972 973 974 975 976 977 978 979 980

out:
	spin_unlock_irqrestore(&m->lock, flags);

	return 0;
}

/*
 * Reinstate a previously-failed path
 */
static int reinstate_path(struct pgpath *pgpath)
{
981
	int r = 0, run_queue = 0;
L
Linus Torvalds 已提交
982 983 984 985 986
	unsigned long flags;
	struct multipath *m = pgpath->pg->m;

	spin_lock_irqsave(&m->lock, flags);

987
	if (pgpath->is_active)
L
Linus Torvalds 已提交
988 989
		goto out;

990
	if (!pgpath->pg->ps.type->reinstate_path) {
L
Linus Torvalds 已提交
991 992 993 994 995 996 997 998 999 1000
		DMWARN("Reinstate path not supported by path selector %s",
		       pgpath->pg->ps.type->name);
		r = -EINVAL;
		goto out;
	}

	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
	if (r)
		goto out;

1001
	pgpath->is_active = 1;
L
Linus Torvalds 已提交
1002

1003
	if (!m->nr_valid_paths++) {
1004
		m->current_pgpath = NULL;
1005
		run_queue = 1;
1006
	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1007
		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1008 1009
			m->pg_init_in_progress++;
	}
L
Linus Torvalds 已提交
1010

M
Mike Anderson 已提交
1011 1012 1013
	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
		      pgpath->path.dev->name, m->nr_valid_paths);

1014
	schedule_work(&m->trigger_event);
L
Linus Torvalds 已提交
1015 1016 1017

out:
	spin_unlock_irqrestore(&m->lock, flags);
1018 1019
	if (run_queue)
		dm_table_run_md_queue_async(m->ti->table);
L
Linus Torvalds 已提交
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029

	return r;
}

/*
 * Fail or reinstate all paths that match the provided struct dm_dev.
 */
static int action_dev(struct multipath *m, struct dm_dev *dev,
		      action_fn action)
{
1030
	int r = -EINVAL;
L
Linus Torvalds 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
	struct pgpath *pgpath;
	struct priority_group *pg;

	list_for_each_entry(pg, &m->priority_groups, list) {
		list_for_each_entry(pgpath, &pg->pgpaths, list) {
			if (pgpath->path.dev == dev)
				r = action(pgpath);
		}
	}

	return r;
}

/*
 * Temporarily try to avoid having to use the specified PG
 */
static void bypass_pg(struct multipath *m, struct priority_group *pg,
		      int bypassed)
{
	unsigned long flags;

	spin_lock_irqsave(&m->lock, flags);

	pg->bypassed = bypassed;
	m->current_pgpath = NULL;
	m->current_pg = NULL;

	spin_unlock_irqrestore(&m->lock, flags);

1060
	schedule_work(&m->trigger_event);
L
Linus Torvalds 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
}

/*
 * Switch to using the specified PG from the next I/O that gets mapped
 */
static int switch_pg_num(struct multipath *m, const char *pgstr)
{
	struct priority_group *pg;
	unsigned pgnum;
	unsigned long flags;
1071
	char dummy;
L
Linus Torvalds 已提交
1072

1073
	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
L
Linus Torvalds 已提交
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	    (pgnum > m->nr_priority_groups)) {
		DMWARN("invalid PG number supplied to switch_pg_num");
		return -EINVAL;
	}

	spin_lock_irqsave(&m->lock, flags);
	list_for_each_entry(pg, &m->priority_groups, list) {
		pg->bypassed = 0;
		if (--pgnum)
			continue;

		m->current_pgpath = NULL;
		m->current_pg = NULL;
		m->next_pg = pg;
	}
	spin_unlock_irqrestore(&m->lock, flags);

1091
	schedule_work(&m->trigger_event);
L
Linus Torvalds 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	return 0;
}

/*
 * Set/clear bypassed status of a PG.
 * PGs are numbered upwards from 1 in the order they were declared.
 */
static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
{
	struct priority_group *pg;
	unsigned pgnum;
1103
	char dummy;
L
Linus Torvalds 已提交
1104

1105
	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
L
Linus Torvalds 已提交
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
	    (pgnum > m->nr_priority_groups)) {
		DMWARN("invalid PG number supplied to bypass_pg");
		return -EINVAL;
	}

	list_for_each_entry(pg, &m->priority_groups, list) {
		if (!--pgnum)
			break;
	}

	bypass_pg(m, pg, bypassed);
	return 0;
}

D
Dave Wysochanski 已提交
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
/*
 * Should we retry pg_init immediately?
 */
static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
{
	unsigned long flags;
	int limit_reached = 0;

	spin_lock_irqsave(&m->lock, flags);

1130
	if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
D
Dave Wysochanski 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139
		m->pg_init_required = 1;
	else
		limit_reached = 1;

	spin_unlock_irqrestore(&m->lock, flags);

	return limit_reached;
}

1140
static void pg_init_done(void *data, int errors)
1141
{
1142
	struct pgpath *pgpath = data;
1143 1144 1145
	struct priority_group *pg = pgpath->pg;
	struct multipath *m = pg->m;
	unsigned long flags;
1146
	unsigned delay_retry = 0;
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156

	/* device or driver problems */
	switch (errors) {
	case SCSI_DH_OK:
		break;
	case SCSI_DH_NOSYS:
		if (!m->hw_handler_name) {
			errors = 0;
			break;
		}
1157 1158
		DMERR("Could not failover the device: Handler scsi_dh_%s "
		      "Error %d.", m->hw_handler_name, errors);
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
		/*
		 * Fail path for now, so we do not ping pong
		 */
		fail_path(pgpath);
		break;
	case SCSI_DH_DEV_TEMP_BUSY:
		/*
		 * Probably doing something like FW upgrade on the
		 * controller so try the other pg.
		 */
		bypass_pg(m, pg, 1);
		break;
	case SCSI_DH_RETRY:
1172 1173
		/* Wait before retrying. */
		delay_retry = 1;
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	case SCSI_DH_IMM_RETRY:
	case SCSI_DH_RES_TEMP_UNAVAIL:
		if (pg_init_limit_reached(m, pgpath))
			fail_path(pgpath);
		errors = 0;
		break;
	default:
		/*
		 * We probably do not want to fail the path for a device
		 * error, but this is what the old dm did. In future
		 * patches we can do more advanced handling.
		 */
		fail_path(pgpath);
	}

	spin_lock_irqsave(&m->lock, flags);
	if (errors) {
1191 1192 1193 1194 1195
		if (pgpath == m->current_pgpath) {
			DMERR("Could not failover device. Error %d.", errors);
			m->current_pgpath = NULL;
			m->current_pg = NULL;
		}
1196
	} else if (!m->pg_init_required)
1197 1198
		pg->bypassed = 0;

1199 1200 1201 1202
	if (--m->pg_init_in_progress)
		/* Activations of other paths are still on going */
		goto out;

1203 1204 1205 1206 1207 1208
	if (m->pg_init_required) {
		m->pg_init_delay_retry = delay_retry;
		if (__pg_init_all_paths(m))
			goto out;
	}
	m->queue_io = 0;
1209

1210 1211 1212 1213 1214
	/*
	 * Wake up any thread waiting to suspend.
	 */
	wake_up(&m->pg_init_wait);

1215
out:
1216 1217 1218
	spin_unlock_irqrestore(&m->lock, flags);
}

1219 1220
static void activate_path(struct work_struct *work)
{
1221
	struct pgpath *pgpath =
1222
		container_of(work, struct pgpath, activate_path.work);
1223

1224 1225 1226 1227 1228
	if (pgpath->is_active)
		scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
				 pg_init_done, pgpath);
	else
		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1229 1230
}

1231 1232 1233 1234 1235 1236 1237
static int noretry_error(int error)
{
	switch (error) {
	case -EOPNOTSUPP:
	case -EREMOTEIO:
	case -EILSEQ:
	case -ENODATA:
1238
	case -ENOSPC:
1239 1240 1241 1242 1243 1244 1245
		return 1;
	}

	/* Anything else could be a path failure, so should be retried */
	return 0;
}

L
Linus Torvalds 已提交
1246 1247 1248
/*
 * end_io handling
 */
1249
static int do_end_io(struct multipath *m, struct request *clone,
A
Alasdair G Kergon 已提交
1250
		     int error, struct dm_mpath_io *mpio)
L
Linus Torvalds 已提交
1251
{
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
	/*
	 * We don't queue any clone request inside the multipath target
	 * during end I/O handling, since those clone requests don't have
	 * bio clones.  If we queue them inside the multipath target,
	 * we need to make bio clones, that requires memory allocation.
	 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
	 *  don't have bio clones.)
	 * Instead of queueing the clone request here, we queue the original
	 * request into dm core, which will remake a clone request and
	 * clone bios for it and resubmit it later.
	 */
	int r = DM_ENDIO_REQUEUE;
1264
	unsigned long flags;
L
Linus Torvalds 已提交
1265

1266
	if (!error && !clone->errors)
L
Linus Torvalds 已提交
1267 1268
		return 0;	/* I/O complete */

1269
	if (noretry_error(error))
M
Mike Snitzer 已提交
1270 1271
		return error;

1272 1273
	if (mpio->pgpath)
		fail_path(mpio->pgpath);
L
Linus Torvalds 已提交
1274

1275
	spin_lock_irqsave(&m->lock, flags);
1276 1277 1278 1279 1280 1281 1282 1283 1284
	if (!m->nr_valid_paths) {
		if (!m->queue_if_no_path) {
			if (!__must_push_back(m))
				r = -EIO;
		} else {
			if (error == -EBADE)
				r = error;
		}
	}
1285
	spin_unlock_irqrestore(&m->lock, flags);
L
Linus Torvalds 已提交
1286

1287
	return r;
L
Linus Torvalds 已提交
1288 1289
}

1290
static int multipath_end_io(struct dm_target *ti, struct request *clone,
L
Linus Torvalds 已提交
1291 1292
			    int error, union map_info *map_context)
{
A
Alasdair G Kergon 已提交
1293 1294
	struct multipath *m = ti->private;
	struct dm_mpath_io *mpio = map_context->ptr;
1295
	struct pgpath *pgpath;
L
Linus Torvalds 已提交
1296 1297 1298
	struct path_selector *ps;
	int r;

1299 1300
	BUG_ON(!mpio);

1301
	r  = do_end_io(m, clone, error, mpio);
1302
	pgpath = mpio->pgpath;
L
Linus Torvalds 已提交
1303 1304 1305
	if (pgpath) {
		ps = &pgpath->pg->ps;
		if (ps->type->end_io)
1306
			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
L
Linus Torvalds 已提交
1307
	}
1308
	clear_mapinfo(m, map_context);
L
Linus Torvalds 已提交
1309 1310 1311 1312 1313 1314

	return r;
}

/*
 * Suspend can't complete until all the I/O is processed so if
1315 1316 1317
 * the last path fails we must error any remaining I/O.
 * Note that if the freeze_bdev fails while suspending, the
 * queue_if_no_path state is lost - userspace should reset it.
L
Linus Torvalds 已提交
1318 1319 1320 1321 1322
 */
static void multipath_presuspend(struct dm_target *ti)
{
	struct multipath *m = (struct multipath *) ti->private;

1323
	queue_if_no_path(m, 0, 1);
L
Linus Torvalds 已提交
1324 1325
}

1326 1327
static void multipath_postsuspend(struct dm_target *ti)
{
1328 1329 1330
	struct multipath *m = ti->private;

	mutex_lock(&m->work_mutex);
1331
	flush_multipath_work(m);
1332
	mutex_unlock(&m->work_mutex);
1333 1334
}

1335 1336 1337
/*
 * Restore the queue_if_no_path setting.
 */
L
Linus Torvalds 已提交
1338 1339 1340 1341 1342 1343
static void multipath_resume(struct dm_target *ti)
{
	struct multipath *m = (struct multipath *) ti->private;
	unsigned long flags;

	spin_lock_irqsave(&m->lock, flags);
1344
	m->queue_if_no_path = m->saved_queue_if_no_path;
L
Linus Torvalds 已提交
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
	spin_unlock_irqrestore(&m->lock, flags);
}

/*
 * Info output has the following format:
 * num_multipath_feature_args [multipath_feature_args]*
 * num_handler_status_args [handler_status_args]*
 * num_groups init_group_number
 *            [A|D|E num_ps_status_args [ps_status_args]*
 *             num_paths num_selector_args
 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
 *
 * Table output has the following format (identical to the constructor string):
 * num_feature_args [features_args]*
 * num_handler_args hw_handler [hw_handler_args]*
 * num_groups init_group_number
 *     [priority selector-name num_ps_args [ps_args]*
 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
 */
1364 1365
static void multipath_status(struct dm_target *ti, status_type_t type,
			     unsigned status_flags, char *result, unsigned maxlen)
L
Linus Torvalds 已提交
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
{
	int sz = 0;
	unsigned long flags;
	struct multipath *m = (struct multipath *) ti->private;
	struct priority_group *pg;
	struct pgpath *p;
	unsigned pg_num;
	char state;

	spin_lock_irqsave(&m->lock, flags);

	/* Features */
	if (type == STATUSTYPE_INFO)
1379
		DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
D
Dave Wysochanski 已提交
1380 1381
	else {
		DMEMIT("%u ", m->queue_if_no_path +
1382
			      (m->pg_init_retries > 0) * 2 +
1383 1384
			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
			      m->retain_attached_hw_handler);
D
Dave Wysochanski 已提交
1385 1386 1387 1388
		if (m->queue_if_no_path)
			DMEMIT("queue_if_no_path ");
		if (m->pg_init_retries)
			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1389 1390
		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1391 1392
		if (m->retain_attached_hw_handler)
			DMEMIT("retain_attached_hw_handler ");
D
Dave Wysochanski 已提交
1393
	}
L
Linus Torvalds 已提交
1394

1395
	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
L
Linus Torvalds 已提交
1396 1397
		DMEMIT("0 ");
	else
1398
		DMEMIT("1 %s ", m->hw_handler_name);
L
Linus Torvalds 已提交
1399 1400 1401 1402 1403 1404 1405 1406

	DMEMIT("%u ", m->nr_priority_groups);

	if (m->next_pg)
		pg_num = m->next_pg->pg_num;
	else if (m->current_pg)
		pg_num = m->current_pg->pg_num;
	else
1407
		pg_num = (m->nr_priority_groups ? 1 : 0);
L
Linus Torvalds 已提交
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434

	DMEMIT("%u ", pg_num);

	switch (type) {
	case STATUSTYPE_INFO:
		list_for_each_entry(pg, &m->priority_groups, list) {
			if (pg->bypassed)
				state = 'D';	/* Disabled */
			else if (pg == m->current_pg)
				state = 'A';	/* Currently Active */
			else
				state = 'E';	/* Enabled */

			DMEMIT("%c ", state);

			if (pg->ps.type->status)
				sz += pg->ps.type->status(&pg->ps, NULL, type,
							  result + sz,
							  maxlen - sz);
			else
				DMEMIT("0 ");

			DMEMIT("%u %u ", pg->nr_pgpaths,
			       pg->ps.type->info_args);

			list_for_each_entry(p, &pg->pgpaths, list) {
				DMEMIT("%s %s %u ", p->path.dev->name,
1435
				       p->is_active ? "A" : "F",
L
Linus Torvalds 已提交
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
				       p->fail_count);
				if (pg->ps.type->status)
					sz += pg->ps.type->status(&pg->ps,
					      &p->path, type, result + sz,
					      maxlen - sz);
			}
		}
		break;

	case STATUSTYPE_TABLE:
		list_for_each_entry(pg, &m->priority_groups, list) {
			DMEMIT("%s ", pg->ps.type->name);

			if (pg->ps.type->status)
				sz += pg->ps.type->status(&pg->ps, NULL, type,
							  result + sz,
							  maxlen - sz);
			else
				DMEMIT("0 ");

			DMEMIT("%u %u ", pg->nr_pgpaths,
			       pg->ps.type->table_args);

			list_for_each_entry(p, &pg->pgpaths, list) {
				DMEMIT("%s ", p->path.dev->name);
				if (pg->ps.type->status)
					sz += pg->ps.type->status(&pg->ps,
					      &p->path, type, result + sz,
					      maxlen - sz);
			}
		}
		break;
	}

	spin_unlock_irqrestore(&m->lock, flags);
}

static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
{
1475
	int r = -EINVAL;
L
Linus Torvalds 已提交
1476 1477 1478 1479
	struct dm_dev *dev;
	struct multipath *m = (struct multipath *) ti->private;
	action_fn action;

1480 1481
	mutex_lock(&m->work_mutex);

1482 1483 1484 1485 1486
	if (dm_suspended(ti)) {
		r = -EBUSY;
		goto out;
	}

L
Linus Torvalds 已提交
1487
	if (argc == 1) {
1488
		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1489 1490
			r = queue_if_no_path(m, 1, 0);
			goto out;
1491
		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1492 1493 1494
			r = queue_if_no_path(m, 0, 0);
			goto out;
		}
L
Linus Torvalds 已提交
1495 1496
	}

1497
	if (argc != 2) {
1498
		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1499 1500
		goto out;
	}
L
Linus Torvalds 已提交
1501

1502
	if (!strcasecmp(argv[0], "disable_group")) {
1503 1504
		r = bypass_pg_num(m, argv[1], 1);
		goto out;
1505
	} else if (!strcasecmp(argv[0], "enable_group")) {
1506 1507
		r = bypass_pg_num(m, argv[1], 0);
		goto out;
1508
	} else if (!strcasecmp(argv[0], "switch_group")) {
1509 1510
		r = switch_pg_num(m, argv[1]);
		goto out;
1511
	} else if (!strcasecmp(argv[0], "reinstate_path"))
L
Linus Torvalds 已提交
1512
		action = reinstate_path;
1513
	else if (!strcasecmp(argv[0], "fail_path"))
L
Linus Torvalds 已提交
1514
		action = fail_path;
1515
	else {
1516
		DMWARN("Unrecognised multipath message received: %s", argv[0]);
1517 1518
		goto out;
	}
L
Linus Torvalds 已提交
1519

1520
	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
L
Linus Torvalds 已提交
1521
	if (r) {
1522
		DMWARN("message: error getting device %s",
L
Linus Torvalds 已提交
1523
		       argv[1]);
1524
		goto out;
L
Linus Torvalds 已提交
1525 1526 1527 1528 1529 1530
	}

	r = action_dev(m, dev, action);

	dm_put_device(ti, dev);

1531 1532
out:
	mutex_unlock(&m->work_mutex);
L
Linus Torvalds 已提交
1533 1534 1535
	return r;
}

1536
static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
M
Milan Broz 已提交
1537 1538
			   unsigned long arg)
{
1539
	struct multipath *m = ti->private;
1540
	struct pgpath *pgpath;
1541 1542
	struct block_device *bdev;
	fmode_t mode;
M
Milan Broz 已提交
1543
	unsigned long flags;
1544 1545 1546 1547 1548
	int r;

	bdev = NULL;
	mode = 0;
	r = 0;
M
Milan Broz 已提交
1549 1550 1551 1552

	spin_lock_irqsave(&m->lock, flags);

	if (!m->current_pgpath)
1553
		__choose_pgpath(m, 0);
M
Milan Broz 已提交
1554

1555 1556 1557 1558 1559
	pgpath = m->current_pgpath;

	if (pgpath) {
		bdev = pgpath->path.dev->bdev;
		mode = pgpath->path.dev->mode;
1560
	}
M
Milan Broz 已提交
1561

1562
	if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1563
		r = -ENOTCONN;
M
Milan Broz 已提交
1564 1565 1566 1567 1568
	else if (!bdev)
		r = -EIO;

	spin_unlock_irqrestore(&m->lock, flags);

1569 1570 1571
	/*
	 * Only pass ioctls through if the device sizes match exactly.
	 */
1572 1573
	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
		r = scsi_verify_blk_ioctl(NULL, cmd);
1574

1575 1576 1577 1578 1579 1580 1581 1582
	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
		spin_lock_irqsave(&m->lock, flags);
		if (!m->current_pg) {
			/* Path status changed, redo selection */
			__choose_pgpath(m, 0);
		}
		if (m->pg_init_required)
			__pg_init_all_paths(m);
1583
		spin_unlock_irqrestore(&m->lock, flags);
1584
		dm_table_run_md_queue_async(m->ti->table);
1585
	}
1586

1587
	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
M
Milan Broz 已提交
1588 1589
}

1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
static int multipath_iterate_devices(struct dm_target *ti,
				     iterate_devices_callout_fn fn, void *data)
{
	struct multipath *m = ti->private;
	struct priority_group *pg;
	struct pgpath *p;
	int ret = 0;

	list_for_each_entry(pg, &m->priority_groups, list) {
		list_for_each_entry(p, &pg->pgpaths, list) {
1600
			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1601 1602 1603 1604 1605 1606 1607 1608 1609
			if (ret)
				goto out;
		}
	}

out:
	return ret;
}

1610 1611 1612 1613
static int __pgpath_busy(struct pgpath *pgpath)
{
	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);

1614
	return blk_lld_busy(q);
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
}

/*
 * We return "busy", only when we can map I/Os but underlying devices
 * are busy (so even if we map I/Os now, the I/Os will wait on
 * the underlying queue).
 * In other words, if we want to kill I/Os or queue them inside us
 * due to map unavailability, we don't return "busy".  Otherwise,
 * dm core won't give us the I/Os and we can't do what we want.
 */
static int multipath_busy(struct dm_target *ti)
{
	int busy = 0, has_active = 0;
	struct multipath *m = ti->private;
	struct priority_group *pg;
	struct pgpath *pgpath;
	unsigned long flags;

	spin_lock_irqsave(&m->lock, flags);

1635 1636 1637
	/* pg_init in progress or no paths available */
	if (m->pg_init_in_progress ||
	    (!m->nr_valid_paths && m->queue_if_no_path)) {
1638 1639 1640
		busy = 1;
		goto out;
	}
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
	/* Guess which priority_group will be used at next mapping time */
	if (unlikely(!m->current_pgpath && m->next_pg))
		pg = m->next_pg;
	else if (likely(m->current_pg))
		pg = m->current_pg;
	else
		/*
		 * We don't know which pg will be used at next mapping time.
		 * We don't call __choose_pgpath() here to avoid to trigger
		 * pg_init just by busy checking.
		 * So we don't know whether underlying devices we will be using
		 * at next mapping time are busy or not. Just try mapping.
		 */
		goto out;

	/*
	 * If there is one non-busy active path at least, the path selector
	 * will be able to select it. So we consider such a pg as not busy.
	 */
	busy = 1;
	list_for_each_entry(pgpath, &pg->pgpaths, list)
		if (pgpath->is_active) {
			has_active = 1;

			if (!__pgpath_busy(pgpath)) {
				busy = 0;
				break;
			}
		}

	if (!has_active)
		/*
		 * No active path in this pg, so this pg won't be used and
		 * the current_pg will be changed at next mapping time.
		 * We need to try mapping to determine it.
		 */
		busy = 0;

out:
	spin_unlock_irqrestore(&m->lock, flags);

	return busy;
}

L
Linus Torvalds 已提交
1685 1686 1687 1688 1689
/*-----------------------------------------------------------------
 * Module setup
 *---------------------------------------------------------------*/
static struct target_type multipath_target = {
	.name = "multipath",
1690
	.version = {1, 9, 0},
L
Linus Torvalds 已提交
1691 1692 1693
	.module = THIS_MODULE,
	.ctr = multipath_ctr,
	.dtr = multipath_dtr,
1694
	.map_rq = multipath_map,
1695 1696
	.clone_and_map_rq = multipath_clone_and_map,
	.release_clone_rq = multipath_release_clone,
1697
	.rq_end_io = multipath_end_io,
L
Linus Torvalds 已提交
1698
	.presuspend = multipath_presuspend,
1699
	.postsuspend = multipath_postsuspend,
L
Linus Torvalds 已提交
1700 1701 1702
	.resume = multipath_resume,
	.status = multipath_status,
	.message = multipath_message,
M
Milan Broz 已提交
1703
	.ioctl  = multipath_ioctl,
1704
	.iterate_devices = multipath_iterate_devices,
1705
	.busy = multipath_busy,
L
Linus Torvalds 已提交
1706 1707 1708 1709 1710 1711 1712
};

static int __init dm_multipath_init(void)
{
	int r;

	/* allocate a slab for the dm_ios */
A
Alasdair G Kergon 已提交
1713
	_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
L
Linus Torvalds 已提交
1714 1715 1716 1717 1718
	if (!_mpio_cache)
		return -ENOMEM;

	r = dm_register_target(&multipath_target);
	if (r < 0) {
1719
		DMERR("register failed %d", r);
1720 1721
		r = -EINVAL;
		goto bad_register_target;
L
Linus Torvalds 已提交
1722 1723
	}

1724
	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1725
	if (!kmultipathd) {
1726
		DMERR("failed to create workqueue kmpathd");
1727 1728
		r = -ENOMEM;
		goto bad_alloc_kmultipathd;
1729 1730
	}

1731 1732 1733 1734 1735 1736
	/*
	 * A separate workqueue is used to handle the device handlers
	 * to avoid overloading existing workqueue. Overloading the
	 * old workqueue would also create a bottleneck in the
	 * path of the storage hardware device activation.
	 */
1737 1738
	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
						  WQ_MEM_RECLAIM);
1739 1740
	if (!kmpath_handlerd) {
		DMERR("failed to create workqueue kmpath_handlerd");
1741 1742
		r = -ENOMEM;
		goto bad_alloc_kmpath_handlerd;
1743 1744
	}

1745
	DMINFO("version %u.%u.%u loaded",
L
Linus Torvalds 已提交
1746 1747 1748
	       multipath_target.version[0], multipath_target.version[1],
	       multipath_target.version[2]);

1749 1750 1751 1752 1753 1754 1755 1756 1757
	return 0;

bad_alloc_kmpath_handlerd:
	destroy_workqueue(kmultipathd);
bad_alloc_kmultipathd:
	dm_unregister_target(&multipath_target);
bad_register_target:
	kmem_cache_destroy(_mpio_cache);

L
Linus Torvalds 已提交
1758 1759 1760 1761 1762
	return r;
}

static void __exit dm_multipath_exit(void)
{
1763
	destroy_workqueue(kmpath_handlerd);
1764 1765
	destroy_workqueue(kmultipathd);

1766
	dm_unregister_target(&multipath_target);
L
Linus Torvalds 已提交
1767 1768 1769 1770 1771 1772 1773 1774 1775
	kmem_cache_destroy(_mpio_cache);
}

module_init(dm_multipath_init);
module_exit(dm_multipath_exit);

MODULE_DESCRIPTION(DM_NAME " multipath target");
MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");