scsi_dh_rdac.c 20.4 KB
Newer Older
1
/*
2
 * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Copyright (C) 2005 Mike Christie. All rights reserved.
 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 */
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
25
#include <linux/workqueue.h>
26
#include <linux/slab.h>
27
#include <linux/module.h>
28 29

#define RDAC_NAME "rdac"
30
#define RDAC_RETRY_COUNT 5
31 32 33 34 35 36 37 38

/*
 * LSI mode page stuff
 *
 * These struct definitions and the forming of the
 * mode page were taken from the LSI RDAC 2.4 GPL'd
 * driver, and then converted to Linux conventions.
 */
39
#define RDAC_QUIESCENCE_TIME 20
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/*
 * Page Codes
 */
#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c

/*
 * Controller modes definitions
 */
#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS	0x02

/*
 * RDAC Options field
 */
#define RDAC_FORCED_QUIESENCE 0x02

#define RDAC_TIMEOUT	(60 * HZ)
#define RDAC_RETRIES	3

struct rdac_mode_6_hdr {
	u8	data_len;
	u8	medium_type;
	u8	device_params;
	u8	block_desc_len;
};

struct rdac_mode_10_hdr {
	u16	data_len;
	u8	medium_type;
	u8	device_params;
	u16	reserved;
	u16	block_desc_len;
};

struct rdac_mode_common {
	u8	controller_serial[16];
	u8	alt_controller_serial[16];
	u8	rdac_mode[2];
	u8	alt_rdac_mode[2];
	u8	quiescence_timeout;
	u8	rdac_options;
};

struct rdac_pg_legacy {
	struct rdac_mode_6_hdr hdr;
	u8	page_code;
	u8	page_len;
	struct rdac_mode_common common;
#define MODE6_MAX_LUN	32
	u8	lun_table[MODE6_MAX_LUN];
	u8	reserved2[32];
	u8	reserved3;
	u8	reserved4;
};

struct rdac_pg_expanded {
	struct rdac_mode_10_hdr hdr;
	u8	page_code;
	u8	subpage_code;
	u8	page_len[2];
	struct rdac_mode_common common;
	u8	lun_table[256];
	u8	reserved3;
	u8	reserved4;
};

struct c9_inquiry {
	u8	peripheral_info;
	u8	page_code;	/* 0xC9 */
	u8	reserved1;
	u8	page_len;
	u8	page_id[4];	/* "vace" */
	u8	avte_cvp;
	u8	path_prio;
	u8	reserved2[38];
};

#define SUBSYS_ID_LEN	16
#define SLOT_ID_LEN	2
118
#define ARRAY_LABEL_LEN	31
119 120 121 122 123 124 125 126 127 128 129 130 131

struct c4_inquiry {
	u8	peripheral_info;
	u8	page_code;	/* 0xC4 */
	u8	reserved1;
	u8	page_len;
	u8	page_id[4];	/* "subs" */
	u8	subsys_id[SUBSYS_ID_LEN];
	u8	revision[4];
	u8	slot_id[SLOT_ID_LEN];
	u8	reserved[2];
};

132
#define UNIQUE_ID_LEN 16
133 134 135 136 137 138 139 140 141 142 143 144
struct c8_inquiry {
	u8	peripheral_info;
	u8	page_code; /* 0xC8 */
	u8	reserved1;
	u8	page_len;
	u8	page_id[4]; /* "edid" */
	u8	reserved2[3];
	u8	vol_uniq_id_len;
	u8	vol_uniq_id[16];
	u8	vol_user_label_len;
	u8	vol_user_label[60];
	u8	array_uniq_id_len;
145
	u8	array_unique_id[UNIQUE_ID_LEN];
146 147 148 149 150
	u8	array_user_label_len;
	u8	array_user_label[60];
	u8	lun[8];
};

151 152 153 154 155 156 157 158 159 160 161
struct rdac_controller {
	u8			array_id[UNIQUE_ID_LEN];
	int			use_ms10;
	struct kref		kref;
	struct list_head	node; /* list of all controllers */
	union			{
		struct rdac_pg_legacy legacy;
		struct rdac_pg_expanded expanded;
	} mode_select;
	u8	index;
	u8	array_name[ARRAY_LABEL_LEN];
162
	struct Scsi_Host	*host;
163 164 165 166 167 168 169
	spinlock_t		ms_lock;
	int			ms_queued;
	struct work_struct	ms_work;
	struct scsi_device	*ms_sdev;
	struct list_head	ms_head;
};

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
struct c2_inquiry {
	u8	peripheral_info;
	u8	page_code;	/* 0xC2 */
	u8	reserved1;
	u8	page_len;
	u8	page_id[4];	/* "swr4" */
	u8	sw_version[3];
	u8	sw_date[3];
	u8	features_enabled;
	u8	max_lun_supported;
	u8	partitions[239]; /* Total allocation length should be 0xFF */
};

struct rdac_dh_data {
	struct rdac_controller	*ctlr;
#define UNINITIALIZED_LUN	(1 << 8)
	unsigned		lun;
187 188 189 190 191 192

#define RDAC_MODE		0
#define RDAC_MODE_AVT		1
#define RDAC_MODE_IOSHIP	2
	unsigned char		mode;

193 194 195
#define RDAC_STATE_ACTIVE	0
#define RDAC_STATE_PASSIVE	1
	unsigned char		state;
196 197 198 199

#define RDAC_LUN_UNOWNED	0
#define RDAC_LUN_OWNED		1
	char			lun_state;
200 201 202 203 204

#define RDAC_PREFERRED		0
#define RDAC_NON_PREFERRED	1
	char			preferred;

205 206 207 208 209 210 211 212 213
	unsigned char		sense[SCSI_SENSE_BUFFERSIZE];
	union			{
		struct c2_inquiry c2;
		struct c4_inquiry c4;
		struct c8_inquiry c8;
		struct c9_inquiry c9;
	} inq;
};

214 215 216 217 218
static const char *mode[] = {
	"RDAC",
	"AVT",
	"IOSHIP",
};
219 220 221 222 223 224
static const char *lun_state[] =
{
	"unowned",
	"owned",
};

225 226 227 228 229 230 231
struct rdac_queue_data {
	struct list_head	entry;
	struct rdac_dh_data	*h;
	activate_complete	callback_fn;
	void			*callback_data;
};

232 233
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
234 235
static struct workqueue_struct *kmpath_rdacd;
static void send_mode_select(struct work_struct *work);
236

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
/*
 * module parameter to enable rdac debug logging.
 * 2 bits for each type of logging, only two types defined for now
 * Can be enhanced if required at later point
 */
static int rdac_logging = 1;
module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
		"Default is 1 - failover logging enabled, "
		"set it to 0xF to enable all the logs");

#define RDAC_LOG_FAILOVER	0
#define RDAC_LOG_SENSE		2

#define RDAC_LOG_BITS		2

#define RDAC_LOG_LEVEL(SHIFT)  \
	((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))

#define RDAC_LOG(SHIFT, sdev, f, arg...) \
do { \
	if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
		sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
} while (0);

262 263 264 265 266 267
static struct request *get_rdac_req(struct scsi_device *sdev,
			void *buffer, unsigned buflen, int rw)
{
	struct request *rq;
	struct request_queue *q = sdev->request_queue;

268
	rq = blk_get_request(q, rw, GFP_NOIO);
269

270
	if (IS_ERR(rq)) {
271 272 273 274
		sdev_printk(KERN_INFO, sdev,
				"get_rdac_req: blk_get_request failed.\n");
		return NULL;
	}
J
Jens Axboe 已提交
275
	blk_rq_set_block_pc(rq);
276

277
	if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
278 279 280 281 282 283
		blk_put_request(rq);
		sdev_printk(KERN_INFO, sdev,
				"get_rdac_req: blk_rq_map_kern failed.\n");
		return NULL;
	}

284 285
	rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
			 REQ_FAILFAST_DRIVER;
286 287 288 289 290 291
	rq->retries = RDAC_RETRIES;
	rq->timeout = RDAC_TIMEOUT;

	return rq;
}

292
static struct request *rdac_failover_get(struct scsi_device *sdev,
293
			struct rdac_dh_data *h, struct list_head *list)
294 295 296 297
{
	struct request *rq;
	struct rdac_mode_common *common;
	unsigned data_size;
298 299
	struct rdac_queue_data *qdata;
	u8 *lun_table;
300 301 302 303 304 305 306 307 308 309 310 311

	if (h->ctlr->use_ms10) {
		struct rdac_pg_expanded *rdac_pg;

		data_size = sizeof(struct rdac_pg_expanded);
		rdac_pg = &h->ctlr->mode_select.expanded;
		memset(rdac_pg, 0, data_size);
		common = &rdac_pg->common;
		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
		rdac_pg->subpage_code = 0x1;
		rdac_pg->page_len[0] = 0x01;
		rdac_pg->page_len[1] = 0x28;
312
		lun_table = rdac_pg->lun_table;
313 314 315 316 317 318 319 320 321
	} else {
		struct rdac_pg_legacy *rdac_pg;

		data_size = sizeof(struct rdac_pg_legacy);
		rdac_pg = &h->ctlr->mode_select.legacy;
		memset(rdac_pg, 0, data_size);
		common = &rdac_pg->common;
		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
		rdac_pg->page_len = 0x68;
322
		lun_table = rdac_pg->lun_table;
323 324 325 326 327
	}
	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
	common->rdac_options = RDAC_FORCED_QUIESENCE;

328 329 330 331
	list_for_each_entry(qdata, list, entry) {
		lun_table[qdata->h->lun] = 0x81;
	}

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	/* get request for block layer packet command */
	rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
	if (!rq)
		return NULL;

	/* Prepare the command. */
	if (h->ctlr->use_ms10) {
		rq->cmd[0] = MODE_SELECT_10;
		rq->cmd[7] = data_size >> 8;
		rq->cmd[8] = data_size & 0xff;
	} else {
		rq->cmd[0] = MODE_SELECT;
		rq->cmd[4] = data_size;
	}
	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);

348 349 350 351
	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

352 353 354 355 356 357 358 359 360 361 362 363
	return rq;
}

static void release_controller(struct kref *kref)
{
	struct rdac_controller *ctlr;
	ctlr = container_of(kref, struct rdac_controller, kref);

	list_del(&ctlr->node);
	kfree(ctlr);
}

364
static struct rdac_controller *get_controller(int index, char *array_name,
365
			u8 *array_id, struct scsi_device *sdev)
366 367 368 369
{
	struct rdac_controller *ctlr, *tmp;

	list_for_each_entry(tmp, &ctlr_list, node) {
370
		if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
371 372
			  (tmp->index == index) &&
			  (tmp->host == sdev->host)) {
373 374 375 376 377 378
			kref_get(&tmp->kref);
			return tmp;
		}
	}
	ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
	if (!ctlr)
379
		return NULL;
380 381

	/* initialize fields of controller */
382 383
	memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
	ctlr->index = index;
384
	ctlr->host = sdev->host;
385 386
	memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);

387 388
	kref_init(&ctlr->kref);
	ctlr->use_ms10 = -1;
389 390 391 392 393
	ctlr->ms_queued = 0;
	ctlr->ms_sdev = NULL;
	spin_lock_init(&ctlr->ms_lock);
	INIT_WORK(&ctlr->ms_work, send_mode_select);
	INIT_LIST_HEAD(&ctlr->ms_head);
394
	list_add(&ctlr->node, &ctlr_list);
395

396 397 398 399
	return ctlr;
}

static int submit_inquiry(struct scsi_device *sdev, int page_code,
400
			  unsigned int len, struct rdac_dh_data *h)
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
{
	struct request *rq;
	struct request_queue *q = sdev->request_queue;
	int err = SCSI_DH_RES_TEMP_UNAVAIL;

	rq = get_rdac_req(sdev, &h->inq, len, READ);
	if (!rq)
		goto done;

	/* Prepare the command. */
	rq->cmd[0] = INQUIRY;
	rq->cmd[1] = 1;
	rq->cmd[2] = page_code;
	rq->cmd[4] = len;
	rq->cmd_len = COMMAND_SIZE(INQUIRY);
416 417 418 419 420

	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

421 422 423
	err = blk_execute_rq(q, NULL, rq, 1);
	if (err == -EIO)
		err = SCSI_DH_IO;
424 425

	blk_put_request(rq);
426 427 428 429
done:
	return err;
}

430
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
431
			char *array_name, u8 *array_id)
432
{
433
	int err, i;
434 435
	struct c8_inquiry *inqp;

436
	err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
437 438
	if (err == SCSI_DH_OK) {
		inqp = &h->inq.c8;
439 440 441 442 443
		if (inqp->page_code != 0xc8)
			return SCSI_DH_NOSYS;
		if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
		    inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
			return SCSI_DH_NOSYS;
444
		h->lun = inqp->lun[7]; /* Uses only the last byte */
445 446 447 448 449

		for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
			*(array_name+i) = inqp->array_user_label[(2*i)+1];

		*(array_name+ARRAY_LABEL_LEN-1) = '\0';
450 451
		memset(array_id, 0, UNIQUE_ID_LEN);
		memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
452 453 454 455
	}
	return err;
}

456
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
457 458 459 460
{
	int err;
	struct c9_inquiry *inqp;

461
	h->state = RDAC_STATE_ACTIVE;
462
	err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
463 464
	if (err == SCSI_DH_OK) {
		inqp = &h->inq.c9;
465 466 467 468 469 470 471 472 473 474
		/* detect the operating mode */
		if ((inqp->avte_cvp >> 5) & 0x1)
			h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
		else if (inqp->avte_cvp >> 7)
			h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
		else
			h->mode = RDAC_MODE; /* LUN in RDAC mode */

		/* Update ownership */
		if (inqp->avte_cvp & 0x1)
475
			h->lun_state = RDAC_LUN_OWNED;
476 477 478 479
		else {
			h->lun_state = RDAC_LUN_UNOWNED;
			if (h->mode == RDAC_MODE)
				h->state = RDAC_STATE_PASSIVE;
480 481
		}

482 483 484 485 486 487
		/* Update path prio*/
		if (inqp->path_prio & 0x1)
			h->preferred = RDAC_PREFERRED;
		else
			h->preferred = RDAC_NON_PREFERRED;
	}
488

489 490 491
	return err;
}

492
static int initialize_controller(struct scsi_device *sdev,
493
		struct rdac_dh_data *h, char *array_name, u8 *array_id)
494
{
495
	int err, index;
496 497
	struct c4_inquiry *inqp;

498
	err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
499 500
	if (err == SCSI_DH_OK) {
		inqp = &h->inq.c4;
501 502 503 504 505
		/* get the controller index */
		if (inqp->slot_id[1] == 0x31)
			index = 0;
		else
			index = 1;
506 507

		spin_lock(&list_lock);
508
		h->ctlr = get_controller(index, array_name, array_id, sdev);
509 510
		if (!h->ctlr)
			err = SCSI_DH_RES_TEMP_UNAVAIL;
511
		spin_unlock(&list_lock);
512 513 514 515
	}
	return err;
}

516
static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
517 518 519 520
{
	int err;
	struct c2_inquiry *inqp;

521
	err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
522 523 524 525 526 527 528 529 530 531 532 533 534 535
	if (err == SCSI_DH_OK) {
		inqp = &h->inq.c2;
		/*
		 * If more than MODE6_MAX_LUN luns are supported, use
		 * mode select 10
		 */
		if (inqp->max_lun_supported >= MODE6_MAX_LUN)
			h->ctlr->use_ms10 = 1;
		else
			h->ctlr->use_ms10 = 0;
	}
	return err;
}

536
static int mode_select_handle_sense(struct scsi_device *sdev,
537
					unsigned char *sensebuf)
538 539
{
	struct scsi_sense_hdr sense_hdr;
540
	int err = SCSI_DH_IO, ret;
541
	struct rdac_dh_data *h = sdev->handler_data;
542

543
	ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
544 545 546
	if (!ret)
		goto done;

547 548 549 550
	switch (sense_hdr.sense_key) {
	case NO_SENSE:
	case ABORTED_COMMAND:
	case UNIT_ATTENTION:
551
		err = SCSI_DH_RETRY;
552 553 554 555 556 557 558 559 560 561 562 563 564
		break;
	case NOT_READY:
		if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
			/* LUN Not Ready and is in the Process of Becoming
			 * Ready
			 */
			err = SCSI_DH_RETRY;
		break;
	case ILLEGAL_REQUEST:
		if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
			/*
			 * Command Lock contention
			 */
565
			err = SCSI_DH_IMM_RETRY;
566 567
		break;
	default:
568
		break;
569 570
	}

571 572 573 574 575
	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
		"MODE_SELECT returned with sense %02x/%02x/%02x",
		(char *) h->ctlr->array_name, h->ctlr->index,
		sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);

576 577 578 579
done:
	return err;
}

580
static void send_mode_select(struct work_struct *work)
581
{
582 583
	struct rdac_controller *ctlr =
		container_of(work, struct rdac_controller, ms_work);
584
	struct request *rq;
585
	struct scsi_device *sdev = ctlr->ms_sdev;
586
	struct rdac_dh_data *h = sdev->handler_data;
587
	struct request_queue *q = sdev->request_queue;
588
	int err, retry_cnt = RDAC_RETRY_COUNT;
589 590 591 592 593 594 595 596 597
	struct rdac_queue_data *tmp, *qdata;
	LIST_HEAD(list);

	spin_lock(&ctlr->ms_lock);
	list_splice_init(&ctlr->ms_head, &list);
	ctlr->ms_queued = 0;
	ctlr->ms_sdev = NULL;
	spin_unlock(&ctlr->ms_lock);

598 599
retry:
	err = SCSI_DH_RES_TEMP_UNAVAIL;
600
	rq = rdac_failover_get(sdev, h, &list);
601 602 603
	if (!rq)
		goto done;

604 605 606
	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
		"%s MODE_SELECT command",
		(char *) h->ctlr->array_name, h->ctlr->index,
607
		(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
608 609

	err = blk_execute_rq(q, NULL, rq, 1);
610 611
	blk_put_request(rq);
	if (err != SCSI_DH_OK) {
612
		err = mode_select_handle_sense(sdev, h->sense);
613 614
		if (err == SCSI_DH_RETRY && retry_cnt--)
			goto retry;
615 616
		if (err == SCSI_DH_IMM_RETRY)
			goto retry;
617
	}
618
	if (err == SCSI_DH_OK) {
619
		h->state = RDAC_STATE_ACTIVE;
620 621 622 623
		RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
				"MODE_SELECT completed",
				(char *) h->ctlr->array_name, h->ctlr->index);
	}
624

625
done:
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
	list_for_each_entry_safe(qdata, tmp, &list, entry) {
		list_del(&qdata->entry);
		if (err == SCSI_DH_OK)
			qdata->h->state = RDAC_STATE_ACTIVE;
		if (qdata->callback_fn)
			qdata->callback_fn(qdata->callback_data, err);
		kfree(qdata);
	}
	return;
}

static int queue_mode_select(struct scsi_device *sdev,
				activate_complete fn, void *data)
{
	struct rdac_queue_data *qdata;
	struct rdac_controller *ctlr;

	qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
	if (!qdata)
		return SCSI_DH_RETRY;

647
	qdata->h = sdev->handler_data;
648 649 650 651 652 653 654 655 656 657 658 659 660
	qdata->callback_fn = fn;
	qdata->callback_data = data;

	ctlr = qdata->h->ctlr;
	spin_lock(&ctlr->ms_lock);
	list_add_tail(&qdata->entry, &ctlr->ms_head);
	if (!ctlr->ms_queued) {
		ctlr->ms_queued = 1;
		ctlr->ms_sdev = sdev;
		queue_work(kmpath_rdacd, &ctlr->ms_work);
	}
	spin_unlock(&ctlr->ms_lock);
	return SCSI_DH_OK;
661 662
}

663 664
static int rdac_activate(struct scsi_device *sdev,
			activate_complete fn, void *data)
665
{
666
	struct rdac_dh_data *h = sdev->handler_data;
667
	int err = SCSI_DH_OK;
668
	int act = 0;
669

670 671
	err = check_ownership(sdev, h);
	if (err != SCSI_DH_OK)
672 673
		goto done;

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
	switch (h->mode) {
	case RDAC_MODE:
		if (h->lun_state == RDAC_LUN_UNOWNED)
			act = 1;
		break;
	case RDAC_MODE_IOSHIP:
		if ((h->lun_state == RDAC_LUN_UNOWNED) &&
		    (h->preferred == RDAC_PREFERRED))
			act = 1;
		break;
	default:
		break;
	}

	if (act) {
689 690 691 692
		err = queue_mode_select(sdev, fn, data);
		if (err == SCSI_DH_OK)
			return 0;
	}
693
done:
694 695 696
	if (fn)
		fn(data, err);
	return 0;
697 698 699 700
}

static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
{
701
	struct rdac_dh_data *h = sdev->handler_data;
702 703 704 705 706 707 708 709 710 711 712 713 714
	int ret = BLKPREP_OK;

	if (h->state != RDAC_STATE_ACTIVE) {
		ret = BLKPREP_KILL;
		req->cmd_flags |= REQ_QUIET;
	}
	return ret;

}

static int rdac_check_sense(struct scsi_device *sdev,
				struct scsi_sense_hdr *sense_hdr)
{
715
	struct rdac_dh_data *h = sdev->handler_data;
716 717 718 719 720 721

	RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
			"I/O returned with sense %02x/%02x/%02x",
			(char *) h->ctlr->array_name, h->ctlr->index,
			sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);

722 723
	switch (sense_hdr->sense_key) {
	case NOT_READY:
724 725 726 727 728 729
		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
			/* LUN Not Ready - Logical Unit Not Ready and is in
			* the process of becoming ready
			* Just retry.
			*/
			return ADD_TO_MLQUEUE;
730 731 732 733 734 735 736 737 738 739 740 741
		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
			/* LUN Not Ready - Storage firmware incompatible
			 * Manual code synchonisation required.
			 *
			 * Nothing we can do here. Try to bypass the path.
			 */
			return SUCCESS;
		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
			/* LUN Not Ready - Quiescense in progress
			 *
			 * Just retry and wait.
			 */
742
			return ADD_TO_MLQUEUE;
743 744 745 746 747 748
		if (sense_hdr->asc == 0xA1  && sense_hdr->ascq == 0x02)
			/* LUN Not Ready - Quiescense in progress
			 * or has been achieved
			 * Just retry.
			 */
			return ADD_TO_MLQUEUE;
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
		break;
	case ILLEGAL_REQUEST:
		if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
			/* Invalid Request - Current Logical Unit Ownership.
			 * Controller is not the current owner of the LUN,
			 * Fail the path, so that the other path be used.
			 */
			h->state = RDAC_STATE_PASSIVE;
			return SUCCESS;
		}
		break;
	case UNIT_ATTENTION:
		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
			/*
			 * Power On, Reset, or Bus Device Reset, just retry.
764 765 766 767 768
			 */
			return ADD_TO_MLQUEUE;
		if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
			/*
			 * Quiescence in progress , just retry.
769
			 */
770
			return ADD_TO_MLQUEUE;
771 772 773 774 775 776
		break;
	}
	/* success just means we do not care what scsi-ml does */
	return SCSI_RETURN_NOT_HANDLED;
}

777
static int rdac_bus_attach(struct scsi_device *sdev)
778 779
{
	struct rdac_dh_data *h;
780
	int err;
781
	char array_name[ARRAY_LABEL_LEN];
782
	char array_id[UNIQUE_ID_LEN];
783

784
	h = kzalloc(sizeof(*h) , GFP_KERNEL);
785
	if (!h)
786
		return -ENOMEM;
787 788
	h->lun = UNINITIALIZED_LUN;
	h->state = RDAC_STATE_ACTIVE;
789

790
	err = get_lun_info(sdev, h, array_name, array_id);
791 792 793
	if (err != SCSI_DH_OK)
		goto failed;

794
	err = initialize_controller(sdev, h, array_name, array_id);
795 796 797
	if (err != SCSI_DH_OK)
		goto failed;

798 799 800 801 802 803 804 805
	err = check_ownership(sdev, h);
	if (err != SCSI_DH_OK)
		goto clean_ctlr;

	err = set_mode_select(sdev, h);
	if (err != SCSI_DH_OK)
		goto clean_ctlr;

806
	sdev_printk(KERN_NOTICE, sdev,
807 808 809
		    "%s: LUN %d (%s) (%s)\n",
		    RDAC_NAME, h->lun, mode[(int)h->mode],
		    lun_state[(int)h->lun_state]);
810

811 812
	sdev->handler_data = h;
	return 0;
813

814
clean_ctlr:
815
	spin_lock(&list_lock);
816
	kref_put(&h->ctlr->kref, release_controller);
817
	spin_unlock(&list_lock);
818

819
failed:
820
	kfree(h);
821
	return -EINVAL;
822 823
}

824 825
static void rdac_bus_detach( struct scsi_device *sdev )
{
826
	struct rdac_dh_data *h = sdev->handler_data;
827

828 829 830 831
	if (h->ctlr && h->ctlr->ms_queued)
		flush_workqueue(kmpath_rdacd);

	spin_lock(&list_lock);
832 833
	if (h->ctlr)
		kref_put(&h->ctlr->kref, release_controller);
834
	spin_unlock(&list_lock);
835
	sdev->handler_data = NULL;
836
	kfree(h);
837 838
}

839 840 841 842 843 844 845 846 847
static struct scsi_device_handler rdac_dh = {
	.name = RDAC_NAME,
	.module = THIS_MODULE,
	.prep_fn = rdac_prep_fn,
	.check_sense = rdac_check_sense,
	.attach = rdac_bus_attach,
	.detach = rdac_bus_detach,
	.activate = rdac_activate,
};
848

849 850 851 852 853
static int __init rdac_init(void)
{
	int r;

	r = scsi_register_device_handler(&rdac_dh);
854
	if (r != 0) {
855
		printk(KERN_ERR "Failed to register scsi device handler.");
856 857 858 859 860 861 862 863 864 865
		goto done;
	}

	/*
	 * Create workqueue to handle mode selects for rdac
	 */
	kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
	if (!kmpath_rdacd) {
		scsi_unregister_device_handler(&rdac_dh);
		printk(KERN_ERR "kmpath_rdacd creation failed.\n");
866 867

		r = -EINVAL;
868 869
	}
done:
870 871 872 873 874
	return r;
}

static void __exit rdac_exit(void)
{
875
	destroy_workqueue(kmpath_rdacd);
876 877 878 879 880 881
	scsi_unregister_device_handler(&rdac_dh);
}

module_init(rdac_init);
module_exit(rdac_exit);

882
MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
883
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
884
MODULE_VERSION("01.00.0000.0000");
885
MODULE_LICENSE("GPL");