scsi_dh_rdac.c 21.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Engenio/LSI RDAC SCSI Device Handler
 *
 * Copyright (C) 2005 Mike Christie. All rights reserved.
 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 */
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
25
#include <linux/workqueue.h>
26 27

#define RDAC_NAME "rdac"
28
#define RDAC_RETRY_COUNT 5
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115

/*
 * LSI mode page stuff
 *
 * These struct definitions and the forming of the
 * mode page were taken from the LSI RDAC 2.4 GPL'd
 * driver, and then converted to Linux conventions.
 */
#define RDAC_QUIESCENCE_TIME 20;
/*
 * Page Codes
 */
#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c

/*
 * Controller modes definitions
 */
#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS	0x02

/*
 * RDAC Options field
 */
#define RDAC_FORCED_QUIESENCE 0x02

#define RDAC_TIMEOUT	(60 * HZ)
#define RDAC_RETRIES	3

struct rdac_mode_6_hdr {
	u8	data_len;
	u8	medium_type;
	u8	device_params;
	u8	block_desc_len;
};

struct rdac_mode_10_hdr {
	u16	data_len;
	u8	medium_type;
	u8	device_params;
	u16	reserved;
	u16	block_desc_len;
};

struct rdac_mode_common {
	u8	controller_serial[16];
	u8	alt_controller_serial[16];
	u8	rdac_mode[2];
	u8	alt_rdac_mode[2];
	u8	quiescence_timeout;
	u8	rdac_options;
};

struct rdac_pg_legacy {
	struct rdac_mode_6_hdr hdr;
	u8	page_code;
	u8	page_len;
	struct rdac_mode_common common;
#define MODE6_MAX_LUN	32
	u8	lun_table[MODE6_MAX_LUN];
	u8	reserved2[32];
	u8	reserved3;
	u8	reserved4;
};

struct rdac_pg_expanded {
	struct rdac_mode_10_hdr hdr;
	u8	page_code;
	u8	subpage_code;
	u8	page_len[2];
	struct rdac_mode_common common;
	u8	lun_table[256];
	u8	reserved3;
	u8	reserved4;
};

struct c9_inquiry {
	u8	peripheral_info;
	u8	page_code;	/* 0xC9 */
	u8	reserved1;
	u8	page_len;
	u8	page_id[4];	/* "vace" */
	u8	avte_cvp;
	u8	path_prio;
	u8	reserved2[38];
};

#define SUBSYS_ID_LEN	16
#define SLOT_ID_LEN	2
116
#define ARRAY_LABEL_LEN	31
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139

struct c4_inquiry {
	u8	peripheral_info;
	u8	page_code;	/* 0xC4 */
	u8	reserved1;
	u8	page_len;
	u8	page_id[4];	/* "subs" */
	u8	subsys_id[SUBSYS_ID_LEN];
	u8	revision[4];
	u8	slot_id[SLOT_ID_LEN];
	u8	reserved[2];
};

struct rdac_controller {
	u8			subsys_id[SUBSYS_ID_LEN];
	u8			slot_id[SLOT_ID_LEN];
	int			use_ms10;
	struct kref		kref;
	struct list_head	node; /* list of all controllers */
	union			{
		struct rdac_pg_legacy legacy;
		struct rdac_pg_expanded expanded;
	} mode_select;
140 141
	u8	index;
	u8	array_name[ARRAY_LABEL_LEN];
142 143 144 145 146
	spinlock_t		ms_lock;
	int			ms_queued;
	struct work_struct	ms_work;
	struct scsi_device	*ms_sdev;
	struct list_head	ms_head;
147
};
148

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
struct c8_inquiry {
	u8	peripheral_info;
	u8	page_code; /* 0xC8 */
	u8	reserved1;
	u8	page_len;
	u8	page_id[4]; /* "edid" */
	u8	reserved2[3];
	u8	vol_uniq_id_len;
	u8	vol_uniq_id[16];
	u8	vol_user_label_len;
	u8	vol_user_label[60];
	u8	array_uniq_id_len;
	u8	array_unique_id[16];
	u8	array_user_label_len;
	u8	array_user_label[60];
	u8	lun[8];
};

struct c2_inquiry {
	u8	peripheral_info;
	u8	page_code;	/* 0xC2 */
	u8	reserved1;
	u8	page_len;
	u8	page_id[4];	/* "swr4" */
	u8	sw_version[3];
	u8	sw_date[3];
	u8	features_enabled;
	u8	max_lun_supported;
	u8	partitions[239]; /* Total allocation length should be 0xFF */
};

struct rdac_dh_data {
	struct rdac_controller	*ctlr;
#define UNINITIALIZED_LUN	(1 << 8)
	unsigned		lun;
#define RDAC_STATE_ACTIVE	0
#define RDAC_STATE_PASSIVE	1
	unsigned char		state;
187 188 189 190 191

#define RDAC_LUN_UNOWNED	0
#define RDAC_LUN_OWNED		1
#define RDAC_LUN_AVT		2
	char			lun_state;
192 193 194 195 196 197 198 199 200
	unsigned char		sense[SCSI_SENSE_BUFFERSIZE];
	union			{
		struct c2_inquiry c2;
		struct c4_inquiry c4;
		struct c8_inquiry c8;
		struct c9_inquiry c9;
	} inq;
};

201 202 203 204 205 206 207
static const char *lun_state[] =
{
	"unowned",
	"owned",
	"owned (AVT mode)",
};

208 209 210 211 212 213 214
struct rdac_queue_data {
	struct list_head	entry;
	struct rdac_dh_data	*h;
	activate_complete	callback_fn;
	void			*callback_data;
};

215 216
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
217 218
static struct workqueue_struct *kmpath_rdacd;
static void send_mode_select(struct work_struct *work);
219

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
/*
 * module parameter to enable rdac debug logging.
 * 2 bits for each type of logging, only two types defined for now
 * Can be enhanced if required at later point
 */
static int rdac_logging = 1;
module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
		"Default is 1 - failover logging enabled, "
		"set it to 0xF to enable all the logs");

#define RDAC_LOG_FAILOVER	0
#define RDAC_LOG_SENSE		2

#define RDAC_LOG_BITS		2

#define RDAC_LOG_LEVEL(SHIFT)  \
	((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))

#define RDAC_LOG(SHIFT, sdev, f, arg...) \
do { \
	if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
		sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
} while (0);

245 246 247 248 249 250 251 252 253 254 255 256 257
static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
{
	struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
	BUG_ON(scsi_dh_data == NULL);
	return ((struct rdac_dh_data *) scsi_dh_data->buf);
}

static struct request *get_rdac_req(struct scsi_device *sdev,
			void *buffer, unsigned buflen, int rw)
{
	struct request *rq;
	struct request_queue *q = sdev->request_queue;

258
	rq = blk_get_request(q, rw, GFP_NOIO);
259 260 261 262 263 264 265

	if (!rq) {
		sdev_printk(KERN_INFO, sdev,
				"get_rdac_req: blk_get_request failed.\n");
		return NULL;
	}

266
	if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
267 268 269 270 271 272 273
		blk_put_request(rq);
		sdev_printk(KERN_INFO, sdev,
				"get_rdac_req: blk_rq_map_kern failed.\n");
		return NULL;
	}

	rq->cmd_type = REQ_TYPE_BLOCK_PC;
274 275
	rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
			 REQ_FAILFAST_DRIVER;
276 277 278 279 280 281
	rq->retries = RDAC_RETRIES;
	rq->timeout = RDAC_TIMEOUT;

	return rq;
}

282 283
static struct request *rdac_failover_get(struct scsi_device *sdev,
					 struct rdac_dh_data *h)
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
{
	struct request *rq;
	struct rdac_mode_common *common;
	unsigned data_size;

	if (h->ctlr->use_ms10) {
		struct rdac_pg_expanded *rdac_pg;

		data_size = sizeof(struct rdac_pg_expanded);
		rdac_pg = &h->ctlr->mode_select.expanded;
		memset(rdac_pg, 0, data_size);
		common = &rdac_pg->common;
		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
		rdac_pg->subpage_code = 0x1;
		rdac_pg->page_len[0] = 0x01;
		rdac_pg->page_len[1] = 0x28;
	} else {
		struct rdac_pg_legacy *rdac_pg;

		data_size = sizeof(struct rdac_pg_legacy);
		rdac_pg = &h->ctlr->mode_select.legacy;
		memset(rdac_pg, 0, data_size);
		common = &rdac_pg->common;
		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
		rdac_pg->page_len = 0x68;
	}
	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
	common->rdac_options = RDAC_FORCED_QUIESENCE;

	/* get request for block layer packet command */
	rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
	if (!rq)
		return NULL;

	/* Prepare the command. */
	if (h->ctlr->use_ms10) {
		rq->cmd[0] = MODE_SELECT_10;
		rq->cmd[7] = data_size >> 8;
		rq->cmd[8] = data_size & 0xff;
	} else {
		rq->cmd[0] = MODE_SELECT;
		rq->cmd[4] = data_size;
	}
	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);

330 331 332 333
	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

334 335 336 337 338 339 340 341
	return rq;
}

static void release_controller(struct kref *kref)
{
	struct rdac_controller *ctlr;
	ctlr = container_of(kref, struct rdac_controller, kref);

342
	flush_workqueue(kmpath_rdacd);
343 344 345 346 347 348
	spin_lock(&list_lock);
	list_del(&ctlr->node);
	spin_unlock(&list_lock);
	kfree(ctlr);
}

349 350
static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
						char *array_name)
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
{
	struct rdac_controller *ctlr, *tmp;

	spin_lock(&list_lock);

	list_for_each_entry(tmp, &ctlr_list, node) {
		if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
			  (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
			kref_get(&tmp->kref);
			spin_unlock(&list_lock);
			return tmp;
		}
	}
	ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
	if (!ctlr)
		goto done;

	/* initialize fields of controller */
	memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
	memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
371 372 373 374 375 376 377 378
	memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);

	/* update the controller index */
	if (slot_id[1] == 0x31)
		ctlr->index = 0;
	else
		ctlr->index = 1;

379 380
	kref_init(&ctlr->kref);
	ctlr->use_ms10 = -1;
381 382 383 384 385
	ctlr->ms_queued = 0;
	ctlr->ms_sdev = NULL;
	spin_lock_init(&ctlr->ms_lock);
	INIT_WORK(&ctlr->ms_work, send_mode_select);
	INIT_LIST_HEAD(&ctlr->ms_head);
386 387 388 389 390 391 392
	list_add(&ctlr->node, &ctlr_list);
done:
	spin_unlock(&list_lock);
	return ctlr;
}

static int submit_inquiry(struct scsi_device *sdev, int page_code,
393
			  unsigned int len, struct rdac_dh_data *h)
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
{
	struct request *rq;
	struct request_queue *q = sdev->request_queue;
	int err = SCSI_DH_RES_TEMP_UNAVAIL;

	rq = get_rdac_req(sdev, &h->inq, len, READ);
	if (!rq)
		goto done;

	/* Prepare the command. */
	rq->cmd[0] = INQUIRY;
	rq->cmd[1] = 1;
	rq->cmd[2] = page_code;
	rq->cmd[4] = len;
	rq->cmd_len = COMMAND_SIZE(INQUIRY);
409 410 411 412 413

	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

414 415 416
	err = blk_execute_rq(q, NULL, rq, 1);
	if (err == -EIO)
		err = SCSI_DH_IO;
417 418

	blk_put_request(rq);
419 420 421 422
done:
	return err;
}

423 424
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
			char *array_name)
425
{
426
	int err, i;
427 428
	struct c8_inquiry *inqp;

429
	err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
430 431
	if (err == SCSI_DH_OK) {
		inqp = &h->inq.c8;
432 433 434 435 436
		if (inqp->page_code != 0xc8)
			return SCSI_DH_NOSYS;
		if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
		    inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
			return SCSI_DH_NOSYS;
437
		h->lun = inqp->lun[7]; /* Uses only the last byte */
438 439 440 441 442

		for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
			*(array_name+i) = inqp->array_user_label[(2*i)+1];

		*(array_name+ARRAY_LABEL_LEN-1) = '\0';
443 444 445 446
	}
	return err;
}

447
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
448 449 450 451
{
	int err;
	struct c9_inquiry *inqp;

452
	h->lun_state = RDAC_LUN_UNOWNED;
453
	h->state = RDAC_STATE_ACTIVE;
454
	err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
455 456
	if (err == SCSI_DH_OK) {
		inqp = &h->inq.c9;
457 458 459 460 461 462 463 464 465 466 467 468
		if ((inqp->avte_cvp >> 7) == 0x1) {
			/* LUN in AVT mode */
			sdev_printk(KERN_NOTICE, sdev,
				    "%s: AVT mode detected\n",
				    RDAC_NAME);
			h->lun_state = RDAC_LUN_AVT;
		} else if ((inqp->avte_cvp & 0x1) != 0) {
			/* LUN was owned by the controller */
			h->lun_state = RDAC_LUN_OWNED;
		}
	}

469 470 471
	if (h->lun_state == RDAC_LUN_UNOWNED)
		h->state = RDAC_STATE_PASSIVE;

472 473 474
	return err;
}

475
static int initialize_controller(struct scsi_device *sdev,
476
				 struct rdac_dh_data *h, char *array_name)
477 478 479 480
{
	int err;
	struct c4_inquiry *inqp;

481
	err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
482 483
	if (err == SCSI_DH_OK) {
		inqp = &h->inq.c4;
484 485
		h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
					array_name);
486 487 488 489 490 491
		if (!h->ctlr)
			err = SCSI_DH_RES_TEMP_UNAVAIL;
	}
	return err;
}

492
static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
493 494 495 496
{
	int err;
	struct c2_inquiry *inqp;

497
	err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
498 499 500 501 502 503 504 505 506 507 508 509 510 511
	if (err == SCSI_DH_OK) {
		inqp = &h->inq.c2;
		/*
		 * If more than MODE6_MAX_LUN luns are supported, use
		 * mode select 10
		 */
		if (inqp->max_lun_supported >= MODE6_MAX_LUN)
			h->ctlr->use_ms10 = 1;
		else
			h->ctlr->use_ms10 = 0;
	}
	return err;
}

512
static int mode_select_handle_sense(struct scsi_device *sdev,
513
					unsigned char *sensebuf)
514 515
{
	struct scsi_sense_hdr sense_hdr;
516
	int err = SCSI_DH_IO, ret;
517
	struct rdac_dh_data *h = get_rdac_data(sdev);
518

519
	ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
520 521 522
	if (!ret)
		goto done;

523 524 525 526
	switch (sense_hdr.sense_key) {
	case NO_SENSE:
	case ABORTED_COMMAND:
	case UNIT_ATTENTION:
527
		err = SCSI_DH_RETRY;
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
		break;
	case NOT_READY:
		if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
			/* LUN Not Ready and is in the Process of Becoming
			 * Ready
			 */
			err = SCSI_DH_RETRY;
		break;
	case ILLEGAL_REQUEST:
		if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
			/*
			 * Command Lock contention
			 */
			err = SCSI_DH_RETRY;
		break;
	default:
544
		break;
545 546
	}

547 548 549 550 551
	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
		"MODE_SELECT returned with sense %02x/%02x/%02x",
		(char *) h->ctlr->array_name, h->ctlr->index,
		sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);

552 553 554 555
done:
	return err;
}

556
static void send_mode_select(struct work_struct *work)
557
{
558 559
	struct rdac_controller *ctlr =
		container_of(work, struct rdac_controller, ms_work);
560
	struct request *rq;
561 562
	struct scsi_device *sdev = ctlr->ms_sdev;
	struct rdac_dh_data *h = get_rdac_data(sdev);
563
	struct request_queue *q = sdev->request_queue;
564
	int err, retry_cnt = RDAC_RETRY_COUNT;
565 566 567 568 569 570 571 572 573 574 575 576 577 578
	struct rdac_queue_data *tmp, *qdata;
	LIST_HEAD(list);
	u8 *lun_table;

	spin_lock(&ctlr->ms_lock);
	list_splice_init(&ctlr->ms_head, &list);
	ctlr->ms_queued = 0;
	ctlr->ms_sdev = NULL;
	spin_unlock(&ctlr->ms_lock);

	if (ctlr->use_ms10)
		lun_table = ctlr->mode_select.expanded.lun_table;
	else
		lun_table = ctlr->mode_select.legacy.lun_table;
579

580 581
retry:
	err = SCSI_DH_RES_TEMP_UNAVAIL;
582
	rq = rdac_failover_get(sdev, h);
583 584 585
	if (!rq)
		goto done;

586 587 588 589
	list_for_each_entry(qdata, &list, entry) {
		lun_table[qdata->h->lun] = 0x81;
	}

590 591 592
	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
		"%s MODE_SELECT command",
		(char *) h->ctlr->array_name, h->ctlr->index,
593
		(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
594 595

	err = blk_execute_rq(q, NULL, rq, 1);
596 597
	blk_put_request(rq);
	if (err != SCSI_DH_OK) {
598
		err = mode_select_handle_sense(sdev, h->sense);
599 600 601
		if (err == SCSI_DH_RETRY && retry_cnt--)
			goto retry;
	}
602
	if (err == SCSI_DH_OK) {
603
		h->state = RDAC_STATE_ACTIVE;
604 605 606 607
		RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
				"MODE_SELECT completed",
				(char *) h->ctlr->array_name, h->ctlr->index);
	}
608

609
done:
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
	list_for_each_entry_safe(qdata, tmp, &list, entry) {
		list_del(&qdata->entry);
		if (err == SCSI_DH_OK)
			qdata->h->state = RDAC_STATE_ACTIVE;
		if (qdata->callback_fn)
			qdata->callback_fn(qdata->callback_data, err);
		kfree(qdata);
	}
	return;
}

static int queue_mode_select(struct scsi_device *sdev,
				activate_complete fn, void *data)
{
	struct rdac_queue_data *qdata;
	struct rdac_controller *ctlr;

	qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
	if (!qdata)
		return SCSI_DH_RETRY;

	qdata->h = get_rdac_data(sdev);
	qdata->callback_fn = fn;
	qdata->callback_data = data;

	ctlr = qdata->h->ctlr;
	spin_lock(&ctlr->ms_lock);
	list_add_tail(&qdata->entry, &ctlr->ms_head);
	if (!ctlr->ms_queued) {
		ctlr->ms_queued = 1;
		ctlr->ms_sdev = sdev;
		queue_work(kmpath_rdacd, &ctlr->ms_work);
	}
	spin_unlock(&ctlr->ms_lock);
	return SCSI_DH_OK;
645 646
}

647 648
static int rdac_activate(struct scsi_device *sdev,
			activate_complete fn, void *data)
649 650 651 652
{
	struct rdac_dh_data *h = get_rdac_data(sdev);
	int err = SCSI_DH_OK;

653 654
	err = check_ownership(sdev, h);
	if (err != SCSI_DH_OK)
655 656
		goto done;

657 658 659 660 661
	if (h->lun_state == RDAC_LUN_UNOWNED) {
		err = queue_mode_select(sdev, fn, data);
		if (err == SCSI_DH_OK)
			return 0;
	}
662
done:
663 664 665
	if (fn)
		fn(data, err);
	return 0;
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
}

static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
{
	struct rdac_dh_data *h = get_rdac_data(sdev);
	int ret = BLKPREP_OK;

	if (h->state != RDAC_STATE_ACTIVE) {
		ret = BLKPREP_KILL;
		req->cmd_flags |= REQ_QUIET;
	}
	return ret;

}

static int rdac_check_sense(struct scsi_device *sdev,
				struct scsi_sense_hdr *sense_hdr)
{
	struct rdac_dh_data *h = get_rdac_data(sdev);
685 686 687 688 689 690

	RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
			"I/O returned with sense %02x/%02x/%02x",
			(char *) h->ctlr->array_name, h->ctlr->index,
			sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);

691 692
	switch (sense_hdr->sense_key) {
	case NOT_READY:
693 694 695 696 697 698
		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
			/* LUN Not Ready - Logical Unit Not Ready and is in
			* the process of becoming ready
			* Just retry.
			*/
			return ADD_TO_MLQUEUE;
699 700 701 702 703 704 705 706 707 708 709 710
		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
			/* LUN Not Ready - Storage firmware incompatible
			 * Manual code synchonisation required.
			 *
			 * Nothing we can do here. Try to bypass the path.
			 */
			return SUCCESS;
		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
			/* LUN Not Ready - Quiescense in progress
			 *
			 * Just retry and wait.
			 */
711
			return ADD_TO_MLQUEUE;
712 713 714 715 716 717
		if (sense_hdr->asc == 0xA1  && sense_hdr->ascq == 0x02)
			/* LUN Not Ready - Quiescense in progress
			 * or has been achieved
			 * Just retry.
			 */
			return ADD_TO_MLQUEUE;
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
		break;
	case ILLEGAL_REQUEST:
		if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
			/* Invalid Request - Current Logical Unit Ownership.
			 * Controller is not the current owner of the LUN,
			 * Fail the path, so that the other path be used.
			 */
			h->state = RDAC_STATE_PASSIVE;
			return SUCCESS;
		}
		break;
	case UNIT_ATTENTION:
		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
			/*
			 * Power On, Reset, or Bus Device Reset, just retry.
733 734 735 736 737
			 */
			return ADD_TO_MLQUEUE;
		if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
			/*
			 * Quiescence in progress , just retry.
738
			 */
739
			return ADD_TO_MLQUEUE;
740 741 742 743 744 745
		break;
	}
	/* success just means we do not care what scsi-ml does */
	return SCSI_RETURN_NOT_HANDLED;
}

746
static const struct scsi_dh_devlist rdac_dev_list[] = {
747 748 749 750
	{"IBM", "1722"},
	{"IBM", "1724"},
	{"IBM", "1726"},
	{"IBM", "1742"},
751 752
	{"IBM", "1745"},
	{"IBM", "1746"},
753 754 755 756 757 758 759 760 761
	{"IBM", "1814"},
	{"IBM", "1815"},
	{"IBM", "1818"},
	{"IBM", "3526"},
	{"SGI", "TP9400"},
	{"SGI", "TP9500"},
	{"SGI", "IS"},
	{"STK", "OPENstorage D280"},
	{"SUN", "CSM200_R"},
762 763 764
	{"SUN", "LCSM100_I"},
	{"SUN", "LCSM100_S"},
	{"SUN", "LCSM100_E"},
765
	{"SUN", "LCSM100_F"},
766 767
	{"DELL", "MD3000"},
	{"DELL", "MD3000i"},
768 769
	{"DELL", "MD32xx"},
	{"DELL", "MD32xxi"},
770 771
	{"LSI", "INF-01-00"},
	{"ENGENIO", "INF-01-00"},
772 773
	{"STK", "FLEXLINE 380"},
	{"SUN", "CSM100_R_FC"},
774 775 776
	{NULL, NULL},
};

777 778
static int rdac_bus_attach(struct scsi_device *sdev);
static void rdac_bus_detach(struct scsi_device *sdev);
779 780 781 782

static struct scsi_device_handler rdac_dh = {
	.name = RDAC_NAME,
	.module = THIS_MODULE,
783
	.devlist = rdac_dev_list,
784 785
	.prep_fn = rdac_prep_fn,
	.check_sense = rdac_check_sense,
786 787
	.attach = rdac_bus_attach,
	.detach = rdac_bus_detach,
788 789 790
	.activate = rdac_activate,
};

791
static int rdac_bus_attach(struct scsi_device *sdev)
792 793 794 795
{
	struct scsi_dh_data *scsi_dh_data;
	struct rdac_dh_data *h;
	unsigned long flags;
796
	int err;
797
	char array_name[ARRAY_LABEL_LEN];
798

799 800 801
	scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
			       + sizeof(*h) , GFP_KERNEL);
	if (!scsi_dh_data) {
802
		sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
803
			    RDAC_NAME);
804
		return 0;
805
	}
806

807 808 809 810
	scsi_dh_data->scsi_dh = &rdac_dh;
	h = (struct rdac_dh_data *) scsi_dh_data->buf;
	h->lun = UNINITIALIZED_LUN;
	h->state = RDAC_STATE_ACTIVE;
811

812
	err = get_lun_info(sdev, h, array_name);
813 814 815
	if (err != SCSI_DH_OK)
		goto failed;

816
	err = initialize_controller(sdev, h, array_name);
817 818 819
	if (err != SCSI_DH_OK)
		goto failed;

820 821 822 823 824 825 826 827
	err = check_ownership(sdev, h);
	if (err != SCSI_DH_OK)
		goto clean_ctlr;

	err = set_mode_select(sdev, h);
	if (err != SCSI_DH_OK)
		goto clean_ctlr;

828
	if (!try_module_get(THIS_MODULE))
829
		goto clean_ctlr;
830

831 832 833
	spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
	sdev->scsi_dh_data = scsi_dh_data;
	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
834

835 836 837
	sdev_printk(KERN_NOTICE, sdev,
		    "%s: LUN %d (%s)\n",
		    RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
838 839

	return 0;
840

841 842 843
clean_ctlr:
	kref_put(&h->ctlr->kref, release_controller);

844 845 846 847 848
failed:
	kfree(scsi_dh_data);
	sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
		    RDAC_NAME);
	return -EINVAL;
849 850
}

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
static void rdac_bus_detach( struct scsi_device *sdev )
{
	struct scsi_dh_data *scsi_dh_data;
	struct rdac_dh_data *h;
	unsigned long flags;

	spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
	scsi_dh_data = sdev->scsi_dh_data;
	sdev->scsi_dh_data = NULL;
	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);

	h = (struct rdac_dh_data *) scsi_dh_data->buf;
	if (h->ctlr)
		kref_put(&h->ctlr->kref, release_controller);
	kfree(scsi_dh_data);
	module_put(THIS_MODULE);
867
	sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
868 869 870 871
}



872 873 874 875 876
static int __init rdac_init(void)
{
	int r;

	r = scsi_register_device_handler(&rdac_dh);
877
	if (r != 0) {
878
		printk(KERN_ERR "Failed to register scsi device handler.");
879 880 881 882 883 884 885 886 887 888 889 890
		goto done;
	}

	/*
	 * Create workqueue to handle mode selects for rdac
	 */
	kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
	if (!kmpath_rdacd) {
		scsi_unregister_device_handler(&rdac_dh);
		printk(KERN_ERR "kmpath_rdacd creation failed.\n");
	}
done:
891 892 893 894 895
	return r;
}

static void __exit rdac_exit(void)
{
896
	destroy_workqueue(kmpath_rdacd);
897 898 899 900 901 902 903 904 905
	scsi_unregister_device_handler(&rdac_dh);
}

module_init(rdac_init);
module_exit(rdac_exit);

MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
MODULE_LICENSE("GPL");