bfad_im.c 31.2 KB
Newer Older
1
/*
K
Krishna Gudipati 已提交
2
 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * All rights reserved
 * www.brocade.com
 *
 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License (GPL) Version 2 as
 * published by the Free Software Foundation
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

J
Jing Huang 已提交
18
/*
19 20 21 22 23
 *  bfad_im.c Linux driver IM module.
 */

#include "bfad_drv.h"
#include "bfad_im.h"
K
Krishna Gudipati 已提交
24 25
#include "bfa_cb_ioim.h"
#include "bfa_fcs.h"
26 27 28 29 30

BFA_TRC_FILE(LDRV, IM);

DEFINE_IDR(bfad_im_port_index);
struct scsi_transport_template *bfad_im_scsi_transport_template;
31
struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
32
static void bfad_im_itnim_work_handler(struct work_struct *work);
J
Jeff Garzik 已提交
33
static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd);
34
static int bfad_im_slave_alloc(struct scsi_device *sdev);
K
Krishna Gudipati 已提交
35 36
static void bfad_im_fc_rport_add(struct bfad_im_port_s  *im_port,
				struct bfad_itnim_s *itnim);
37 38 39 40 41 42 43 44 45 46

void
bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
			enum bfi_ioim_status io_status, u8 scsi_status,
			int sns_len, u8 *sns_info, s32 residue)
{
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
	struct bfad_s         *bfad = drv;
	struct bfad_itnim_data_s *itnim_data;
	struct bfad_itnim_s *itnim;
47
	u8         host_status = DID_OK;
48 49 50 51 52 53 54 55 56 57 58 59

	switch (io_status) {
	case BFI_IOIM_STS_OK:
		bfa_trc(bfad, scsi_status);
		scsi_set_resid(cmnd, 0);

		if (sns_len > 0) {
			bfa_trc(bfad, sns_len);
			if (sns_len > SCSI_SENSE_BUFFERSIZE)
				sns_len = SCSI_SENSE_BUFFERSIZE;
			memcpy(cmnd->sense_buffer, sns_info, sns_len);
		}
K
Krishna Gudipati 已提交
60

61 62
		if (residue > 0) {
			bfa_trc(bfad, residue);
63
			scsi_set_resid(cmnd, residue);
64 65 66 67 68 69 70 71 72
			if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
				(scsi_bufflen(cmnd) - residue) <
					cmnd->underflow) {
				bfa_trc(bfad, 0);
				host_status = DID_ERROR;
			}
		}
		cmnd->result = ScsiResult(host_status, scsi_status);

73 74 75 76 77 78
		break;

	case BFI_IOIM_STS_ABORTED:
	case BFI_IOIM_STS_TIMEDOUT:
	case BFI_IOIM_STS_PATHTOV:
	default:
K
Krishna Gudipati 已提交
79 80
		host_status = DID_ERROR;
		cmnd->result = ScsiResult(host_status, 0);
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
	}

	/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
	if (cmnd->device->host != NULL)
		scsi_dma_unmap(cmnd);

	cmnd->host_scribble = NULL;
	bfa_trc(bfad, cmnd->result);

	itnim_data = cmnd->device->hostdata;
	if (itnim_data) {
		itnim = itnim_data->itnim;
		if (!cmnd->result && itnim &&
			 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
			/* Queue depth adjustment for good status completion */
			bfad_os_ramp_up_qdepth(itnim, cmnd->device);
		} else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
			/* qfull handling */
			bfad_os_handle_qfull(itnim, cmnd->device);
		}
	}

	cmnd->scsi_done(cmnd);
}

void
bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
{
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
	struct bfad_itnim_data_s *itnim_data;
	struct bfad_itnim_s *itnim;

	cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);

	/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
	if (cmnd->device->host != NULL)
		scsi_dma_unmap(cmnd);

	cmnd->host_scribble = NULL;

	/* Queue depth adjustment */
	if (bfa_lun_queue_depth > cmnd->device->queue_depth) {
		itnim_data = cmnd->device->hostdata;
		if (itnim_data) {
			itnim = itnim_data->itnim;
			if (itnim)
				bfad_os_ramp_up_qdepth(itnim, cmnd->device);
		}
	}

	cmnd->scsi_done(cmnd);
}

void
bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
{
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
	struct bfad_s         *bfad = drv;

	cmnd->result = ScsiResult(DID_ERROR, 0);

	/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
	if (cmnd->device->host != NULL)
		scsi_dma_unmap(cmnd);

	bfa_trc(bfad, cmnd->result);
	cmnd->host_scribble = NULL;
}

void
bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
		   enum bfi_tskim_status tsk_status)
{
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
	wait_queue_head_t *wq;

	cmnd->SCp.Status |= tsk_status << 1;
	set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
	wq = (wait_queue_head_t *) cmnd->SCp.ptr;
	cmnd->SCp.ptr = NULL;

	if (wq)
		wake_up(wq);
}

J
Jing Huang 已提交
166
/*
167 168
 *  Scsi_Host_template SCSI host template
 */
J
Jing Huang 已提交
169
/*
170 171 172 173 174 175 176 177
 * Scsi_Host template entry, returns BFAD PCI info.
 */
static const char *
bfad_im_info(struct Scsi_Host *shost)
{
	static char     bfa_buf[256];
	struct bfad_im_port_s *im_port =
			(struct bfad_im_port_s *) shost->hostdata[0];
K
Krishna Gudipati 已提交
178 179 180
	struct bfad_s *bfad = im_port->bfad;
	struct bfa_s *bfa = &bfad->bfa;
	struct bfa_ioc_s *ioc = &bfa->ioc;
181
	char model[BFA_ADAPTER_MODEL_NAME_LEN];
182

K
Krishna Gudipati 已提交
183
	bfa_get_adapter_model(bfa, model);
184 185

	memset(bfa_buf, 0, sizeof(bfa_buf));
K
Krishna Gudipati 已提交
186 187 188 189 190 191 192
	if (ioc->ctdev)
		snprintf(bfa_buf, sizeof(bfa_buf),
		"Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
		 model, bfad->pci_name, BFAD_DRIVER_VERSION);
	else
		snprintf(bfa_buf, sizeof(bfa_buf),
		"Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
193
		model, bfad->pci_name, BFAD_DRIVER_VERSION);
K
Krishna Gudipati 已提交
194

195 196 197
	return bfa_buf;
}

J
Jing Huang 已提交
198
/*
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
 * Scsi_Host template entry, aborts the specified SCSI command.
 *
 * Returns: SUCCESS or FAILED.
 */
static int
bfad_im_abort_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct bfad_im_port_s *im_port =
			(struct bfad_im_port_s *) shost->hostdata[0];
	struct bfad_s         *bfad = im_port->bfad;
	struct bfa_ioim_s *hal_io;
	unsigned long   flags;
	u32        timeout;
	int             rc = FAILED;

	spin_lock_irqsave(&bfad->bfad_lock, flags);
	hal_io = (struct bfa_ioim_s *) cmnd->host_scribble;
	if (!hal_io) {
		/* IO has been completed, retrun success */
		rc = SUCCESS;
		goto out;
	}
	if (hal_io->dio != (struct bfad_ioim_s *) cmnd) {
		rc = FAILED;
		goto out;
	}

	bfa_trc(bfad, hal_io->iotag);
228 229
	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
		"scsi%d: abort cmnd %p iotag %x\n",
230
		im_port->shost->host_no, cmnd, hal_io->iotag);
K
Krishna Gudipati 已提交
231
	(void) bfa_ioim_abort(hal_io);
232 233 234 235 236 237 238 239 240 241 242 243 244
	spin_unlock_irqrestore(&bfad->bfad_lock, flags);

	/* Need to wait until the command get aborted */
	timeout = 10;
	while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(timeout);
		if (timeout < 4 * HZ)
			timeout *= 2;
	}

	cmnd->scsi_done(cmnd);
	bfa_trc(bfad, hal_io->iotag);
245
	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
K
Krishna Gudipati 已提交
246
		"scsi%d: complete abort 0x%p iotag 0x%x\n",
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
		im_port->shost->host_no, cmnd, hal_io->iotag);
	return SUCCESS;
out:
	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
	return rc;
}

static bfa_status_t
bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
		     struct bfad_itnim_s *itnim)
{
	struct bfa_tskim_s *tskim;
	struct bfa_itnim_s *bfa_itnim;
	bfa_status_t    rc = BFA_STATUS_OK;

	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
	if (!tskim) {
264
		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
K
Krishna Gudipati 已提交
265
			"target reset, fail to allocate tskim\n");
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
		rc = BFA_STATUS_FAILED;
		goto out;
	}

	/*
	 * Set host_scribble to NULL to avoid aborting a task command if
	 * happens.
	 */
	cmnd->host_scribble = NULL;
	cmnd->SCp.Status = 0;
	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
	bfa_tskim_start(tskim, bfa_itnim, (lun_t)0,
			    FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
out:
	return rc;
}

J
Jing Huang 已提交
283
/*
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
 * Scsi_Host template entry, resets a LUN and abort its all commands.
 *
 * Returns: SUCCESS or FAILED.
 *
 */
static int
bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct bfad_im_port_s *im_port =
			(struct bfad_im_port_s *) shost->hostdata[0];
	struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
	struct bfad_s         *bfad = im_port->bfad;
	struct bfa_tskim_s *tskim;
	struct bfad_itnim_s   *itnim;
	struct bfa_itnim_s *bfa_itnim;
300
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
301 302 303 304 305 306 307 308 309 310 311 312 313 314
	int             rc = SUCCESS;
	unsigned long   flags;
	enum bfi_tskim_status task_status;

	spin_lock_irqsave(&bfad->bfad_lock, flags);
	itnim = itnim_data->itnim;
	if (!itnim) {
		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
		rc = FAILED;
		goto out;
	}

	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
	if (!tskim) {
315
		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
316 317 318 319 320 321
				"LUN reset, fail to allocate tskim");
		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
		rc = FAILED;
		goto out;
	}

J
Jing Huang 已提交
322
	/*
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	 * Set host_scribble to NULL to avoid aborting a task command
	 * if happens.
	 */
	cmnd->host_scribble = NULL;
	cmnd->SCp.ptr = (char *)&wq;
	cmnd->SCp.Status = 0;
	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
	bfa_tskim_start(tskim, bfa_itnim,
			    bfad_int_to_lun(cmnd->device->lun),
			    FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
	spin_unlock_irqrestore(&bfad->bfad_lock, flags);

	wait_event(wq, test_bit(IO_DONE_BIT,
			(unsigned long *)&cmnd->SCp.Status));

	task_status = cmnd->SCp.Status >> 1;
	if (task_status != BFI_TSKIM_STS_OK) {
340
		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
K
Krishna Gudipati 已提交
341
			"LUN reset failure, status: %d\n", task_status);
342 343 344 345 346 347 348
		rc = FAILED;
	}

out:
	return rc;
}

J
Jing Huang 已提交
349
/*
350 351 352 353 354 355 356 357 358 359 360 361
 * Scsi_Host template entry, resets the bus and abort all commands.
 */
static int
bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct bfad_im_port_s *im_port =
				(struct bfad_im_port_s *) shost->hostdata[0];
	struct bfad_s         *bfad = im_port->bfad;
	struct bfad_itnim_s   *itnim;
	unsigned long   flags;
	u32        i, rc, err_cnt = 0;
362
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	enum bfi_tskim_status task_status;

	spin_lock_irqsave(&bfad->bfad_lock, flags);
	for (i = 0; i < MAX_FCP_TARGET; i++) {
		itnim = bfad_os_get_itnim(im_port, i);
		if (itnim) {
			cmnd->SCp.ptr = (char *)&wq;
			rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
			if (rc != BFA_STATUS_OK) {
				err_cnt++;
				continue;
			}

			/* wait target reset to complete */
			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
			wait_event(wq, test_bit(IO_DONE_BIT,
					(unsigned long *)&cmnd->SCp.Status));
			spin_lock_irqsave(&bfad->bfad_lock, flags);

			task_status = cmnd->SCp.Status >> 1;
			if (task_status != BFI_TSKIM_STS_OK) {
384
				BFA_LOG(KERN_ERR, bfad, bfa_log_level,
385 386 387 388 389 390 391 392 393 394 395 396 397 398
					"target reset failure,"
					" status: %d\n", task_status);
				err_cnt++;
			}
		}
	}
	spin_unlock_irqrestore(&bfad->bfad_lock, flags);

	if (err_cnt)
		return FAILED;

	return SUCCESS;
}

J
Jing Huang 已提交
399
/*
400 401 402 403 404 405 406 407 408
 * Scsi_Host template entry slave_destroy.
 */
static void
bfad_im_slave_destroy(struct scsi_device *sdev)
{
	sdev->hostdata = NULL;
	return;
}

J
Jing Huang 已提交
409
/*
410 411 412
 *  BFA FCS itnim callbacks
 */

J
Jing Huang 已提交
413
/*
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
 * BFA FCS itnim alloc callback, after successful PRLI
 * Context: Interrupt
 */
void
bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
		    struct bfad_itnim_s **itnim_drv)
{
	*itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
	if (*itnim_drv == NULL)
		return;

	(*itnim_drv)->im = bfad->im;
	*itnim = &(*itnim_drv)->fcs_itnim;
	(*itnim_drv)->state = ITNIM_STATE_NONE;

	/*
	 * Initiaze the itnim_work
	 */
	INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
	bfad->bfad_flags |= BFAD_RPORT_ONLINE;
}

J
Jing Huang 已提交
436
/*
437 438 439 440 441 442 443 444 445 446
 * BFA FCS itnim free callback.
 * Context: Interrupt. bfad_lock is held
 */
void
bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
{
	struct bfad_port_s    *port;
	wwn_t wwpn;
	u32 fcid;
	char wwpn_str[32], fcid_str[16];
K
Krishna Gudipati 已提交
447
	struct bfad_im_s	*im = itnim_drv->im;
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463

	/* online to free state transtion should not happen */
	bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);

	itnim_drv->queue_work = 1;
	/* offline request is not yet done, use the same request to free */
	if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING)
		itnim_drv->queue_work = 0;

	itnim_drv->state = ITNIM_STATE_FREE;
	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
	itnim_drv->im_port = port->im_port;
	wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim);
	fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
	wwn2str(wwpn_str, wwpn);
	fcid2str(fcid_str, fcid);
464
	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
K
Krishna Gudipati 已提交
465
		"ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
466 467
		port->im_port->shost->host_no,
		fcid_str, wwpn_str);
K
Krishna Gudipati 已提交
468 469 470 471

	/* ITNIM processing */
	if (itnim_drv->queue_work)
		queue_work(im->drv_workq, &itnim_drv->itnim_work);
472 473
}

J
Jing Huang 已提交
474
/*
475 476 477 478 479 480 481
 * BFA FCS itnim online callback.
 * Context: Interrupt. bfad_lock is held
 */
void
bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
{
	struct bfad_port_s    *port;
K
Krishna Gudipati 已提交
482
	struct bfad_im_s	*im = itnim_drv->im;
483 484 485 486 487 488

	itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
	itnim_drv->state = ITNIM_STATE_ONLINE;
	itnim_drv->queue_work = 1;
	itnim_drv->im_port = port->im_port;
K
Krishna Gudipati 已提交
489 490 491 492

	/* ITNIM processing */
	if (itnim_drv->queue_work)
		queue_work(im->drv_workq, &itnim_drv->itnim_work);
493 494
}

J
Jing Huang 已提交
495
/*
496 497 498 499 500 501 502 503
 * BFA FCS itnim offline callback.
 * Context: Interrupt. bfad_lock is held
 */
void
bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
{
	struct bfad_port_s    *port;
	struct bfad_s *bfad;
K
Krishna Gudipati 已提交
504
	struct bfad_im_s	*im = itnim_drv->im;
505 506 507 508 509 510 511 512 513 514 515 516

	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
	bfad = port->bfad;
	if ((bfad->pport.flags & BFAD_PORT_DELETE) ||
		 (port->flags & BFAD_PORT_DELETE)) {
		itnim_drv->state = ITNIM_STATE_OFFLINE;
		return;
	}
	itnim_drv->im_port = port->im_port;
	itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
	itnim_drv->queue_work = 1;

K
Krishna Gudipati 已提交
517 518 519
	/* ITNIM processing */
	if (itnim_drv->queue_work)
		queue_work(im->drv_workq, &itnim_drv->itnim_work);
520 521
}

J
Jing Huang 已提交
522
/*
523 524 525
 * Allocate a Scsi_Host for a port.
 */
int
526
bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
K
Krishna Gudipati 已提交
527
			struct device *dev)
528 529 530
{
	int error = 1;

531
	mutex_lock(&bfad_mutex);
532
	if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
533
		mutex_unlock(&bfad_mutex);
534 535 536 537 538 539 540
		printk(KERN_WARNING "idr_pre_get failure\n");
		goto out;
	}

	error = idr_get_new(&bfad_im_port_index, im_port,
					 &im_port->idr_id);
	if (error) {
541
		mutex_unlock(&bfad_mutex);
542 543 544 545
		printk(KERN_WARNING "idr_get_new failure\n");
		goto out;
	}

546 547
	mutex_unlock(&bfad_mutex);

548 549 550 551 552 553 554 555 556 557 558 559 560
	im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
	if (!im_port->shost) {
		error = 1;
		goto out_free_idr;
	}

	im_port->shost->hostdata[0] = (unsigned long)im_port;
	im_port->shost->unique_id = im_port->idr_id;
	im_port->shost->this_id = -1;
	im_port->shost->max_id = MAX_FCP_TARGET;
	im_port->shost->max_lun = MAX_FCP_LUN;
	im_port->shost->max_cmd_len = 16;
	im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
561 562 563 564 565
	if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
		im_port->shost->transportt = bfad_im_scsi_transport_template;
	else
		im_port->shost->transportt =
				bfad_im_scsi_vport_transport_template;
566

J
Jing Huang 已提交
567
	error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev);
568
	if (error) {
569
		printk(KERN_WARNING "scsi_add_host failure %d\n", error);
570 571 572 573 574 575 576 577 578 579
		goto out_fc_rel;
	}

	/* setup host fixed attribute if the lk supports */
	bfad_os_fc_host_init(im_port);

	return 0;

out_fc_rel:
	scsi_host_put(im_port->shost);
J
Jing Huang 已提交
580
	im_port->shost = NULL;
581
out_free_idr:
582
	mutex_lock(&bfad_mutex);
583
	idr_remove(&bfad_im_port_index, im_port->idr_id);
584
	mutex_unlock(&bfad_mutex);
585 586 587 588 589 590 591 592
out:
	return error;
}

void
bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
	bfa_trc(bfad, bfad->inst_no);
593
	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n",
594 595 596 597 598 599 600
			im_port->shost->host_no);

	fc_remove_host(im_port->shost);

	scsi_remove_host(im_port->shost);
	scsi_host_put(im_port->shost);

601
	mutex_lock(&bfad_mutex);
602
	idr_remove(&bfad_im_port_index, im_port->idr_id);
603
	mutex_unlock(&bfad_mutex);
604 605 606 607 608 609 610 611
}

static void
bfad_im_port_delete_handler(struct work_struct *work)
{
	struct bfad_im_port_s *im_port =
		container_of(work, struct bfad_im_port_s, port_delete_work);

612 613 614 615
	if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
		im_port->flags |= BFAD_PORT_DELETE;
		fc_vport_terminate(im_port->fc_vport);
	}
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
}

bfa_status_t
bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port)
{
	int             rc = BFA_STATUS_OK;
	struct bfad_im_port_s *im_port;

	im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC);
	if (im_port == NULL) {
		rc = BFA_STATUS_ENOMEM;
		goto ext;
	}
	port->im_port = im_port;
	im_port->port = port;
	im_port->bfad = bfad;

	INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler);
	INIT_LIST_HEAD(&im_port->itnim_mapped_list);
	INIT_LIST_HEAD(&im_port->binding_list);

ext:
	return rc;
}

void
bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
{
	struct bfad_im_port_s *im_port = port->im_port;

K
Krishna Gudipati 已提交
646
	queue_work(bfad->im->drv_workq,
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
				&im_port->port_delete_work);
}

void
bfad_im_port_clean(struct bfad_im_port_s *im_port)
{
	struct bfad_fcp_binding *bp, *bp_new;
	unsigned long flags;
	struct bfad_s *bfad =  im_port->bfad;

	spin_lock_irqsave(&bfad->bfad_lock, flags);
	list_for_each_entry_safe(bp, bp_new, &im_port->binding_list,
					list_entry) {
		list_del(&bp->list_entry);
		kfree(bp);
	}

	/* the itnim_mapped_list must be empty at this time */
	bfa_assert(list_empty(&im_port->itnim_mapped_list));

	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}

bfa_status_t
bfad_im_probe(struct bfad_s *bfad)
{
	struct bfad_im_s      *im;
	bfa_status_t    rc = BFA_STATUS_OK;

	im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
	if (im == NULL) {
		rc = BFA_STATUS_ENOMEM;
		goto ext;
	}

	bfad->im = im;
	im->bfad = bfad;

	if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) {
		kfree(im);
		rc = BFA_STATUS_FAILED;
	}

ext:
	return rc;
}

void
bfad_im_probe_undo(struct bfad_s *bfad)
{
	if (bfad->im) {
		bfad_os_destroy_workq(bfad->im);
		kfree(bfad->im);
		bfad->im = NULL;
	}
}

struct Scsi_Host *
bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
{
	struct scsi_host_template *sht;

	if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
		sht = &bfad_im_scsi_host_template;
	else
		sht = &bfad_im_vport_template;

	sht->sg_tablesize = bfad->cfg_data.io_max_sge;

	return scsi_host_alloc(sht, sizeof(unsigned long));
}

void
bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
722 723
	if (!(im_port->flags & BFAD_PORT_DELETE))
		flush_workqueue(bfad->im->drv_workq);
724 725 726 727 728 729 730 731 732
	bfad_im_scsi_host_free(im_port->bfad, im_port);
	bfad_im_port_clean(im_port);
	kfree(im_port);
}

void
bfad_os_destroy_workq(struct bfad_im_s *im)
{
	if (im && im->drv_workq) {
K
Krishna Gudipati 已提交
733
		flush_workqueue(im->drv_workq);
734 735 736 737 738 739 740 741 742 743 744
		destroy_workqueue(im->drv_workq);
		im->drv_workq = NULL;
	}
}

bfa_status_t
bfad_os_thread_workq(struct bfad_s *bfad)
{
	struct bfad_im_s      *im = bfad->im;

	bfa_trc(bfad, 0);
K
Krishna Gudipati 已提交
745
	snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
746 747 748 749 750 751 752 753
		 bfad->inst_no);
	im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
	if (!im->drv_workq)
		return BFA_STATUS_FAILED;

	return BFA_STATUS_OK;
}

J
Jing Huang 已提交
754
/*
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
 * Scsi_Host template entry.
 *
 * Description:
 * OS entry point to adjust the queue_depths on a per-device basis.
 * Called once per device during the bus scan.
 * Return non-zero if fails.
 */
static int
bfad_im_slave_configure(struct scsi_device *sdev)
{
	if (sdev->tagged_supported)
		scsi_activate_tcq(sdev, bfa_lun_queue_depth);
	else
		scsi_deactivate_tcq(sdev, bfa_lun_queue_depth);

	return 0;
}

struct scsi_host_template bfad_im_scsi_host_template = {
	.module = THIS_MODULE,
	.name = BFAD_DRIVER_NAME,
	.info = bfad_im_info,
	.queuecommand = bfad_im_queuecommand,
	.eh_abort_handler = bfad_im_abort_handler,
	.eh_device_reset_handler = bfad_im_reset_lun_handler,
	.eh_bus_reset_handler = bfad_im_reset_bus_handler,

	.slave_alloc = bfad_im_slave_alloc,
	.slave_configure = bfad_im_slave_configure,
	.slave_destroy = bfad_im_slave_destroy,

	.this_id = -1,
	.sg_tablesize = BFAD_IO_MAX_SGE,
	.cmd_per_lun = 3,
	.use_clustering = ENABLE_CLUSTERING,
	.shost_attrs = bfad_im_host_attrs,
	.max_sectors = 0xFFFF,
};

struct scsi_host_template bfad_im_vport_template = {
	.module = THIS_MODULE,
	.name = BFAD_DRIVER_NAME,
	.info = bfad_im_info,
	.queuecommand = bfad_im_queuecommand,
	.eh_abort_handler = bfad_im_abort_handler,
	.eh_device_reset_handler = bfad_im_reset_lun_handler,
	.eh_bus_reset_handler = bfad_im_reset_bus_handler,

	.slave_alloc = bfad_im_slave_alloc,
	.slave_configure = bfad_im_slave_configure,
	.slave_destroy = bfad_im_slave_destroy,

	.this_id = -1,
	.sg_tablesize = BFAD_IO_MAX_SGE,
	.cmd_per_lun = 3,
	.use_clustering = ENABLE_CLUSTERING,
	.shost_attrs = bfad_im_vport_attrs,
	.max_sectors = 0xFFFF,
};

bfa_status_t
bfad_im_module_init(void)
{
	bfad_im_scsi_transport_template =
		fc_attach_transport(&bfad_im_fc_function_template);
	if (!bfad_im_scsi_transport_template)
		return BFA_STATUS_ENOMEM;

823 824 825 826 827 828 829
	bfad_im_scsi_vport_transport_template =
		fc_attach_transport(&bfad_im_vport_fc_function_template);
	if (!bfad_im_scsi_vport_transport_template) {
		fc_release_transport(bfad_im_scsi_transport_template);
		return BFA_STATUS_ENOMEM;
	}

830 831 832 833 834 835 836 837
	return BFA_STATUS_OK;
}

void
bfad_im_module_exit(void)
{
	if (bfad_im_scsi_transport_template)
		fc_release_transport(bfad_im_scsi_transport_template);
K
Krishna Gudipati 已提交
838

839 840
	if (bfad_im_scsi_vport_transport_template)
		fc_release_transport(bfad_im_scsi_vport_transport_template);
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
}

void
bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
{
	struct scsi_device *tmp_sdev;

	if (((jiffies - itnim->last_ramp_up_time) >
		BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) &&
		((jiffies - itnim->last_queue_full_time) >
		BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) {
		shost_for_each_device(tmp_sdev, sdev->host) {
			if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
				if (tmp_sdev->id != sdev->id)
					continue;
				if (tmp_sdev->ordered_tags)
					scsi_adjust_queue_depth(tmp_sdev,
						MSG_ORDERED_TAG,
						tmp_sdev->queue_depth + 1);
				else
					scsi_adjust_queue_depth(tmp_sdev,
						MSG_SIMPLE_TAG,
						tmp_sdev->queue_depth + 1);

				itnim->last_ramp_up_time = jiffies;
			}
		}
	}
}

void
bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
{
	struct scsi_device *tmp_sdev;

	itnim->last_queue_full_time = jiffies;

	shost_for_each_device(tmp_sdev, sdev->host) {
		if (tmp_sdev->id != sdev->id)
			continue;
		scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
	}
}

struct bfad_itnim_s *
bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
{
	struct bfad_itnim_s   *itnim = NULL;

	/* Search the mapped list for this target ID */
	list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) {
		if (id == itnim->scsi_tgt_id)
			return itnim;
	}

	return NULL;
}

J
Jing Huang 已提交
899
/*
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
 * Scsi_Host template entry slave_alloc
 */
static int
bfad_im_slave_alloc(struct scsi_device *sdev)
{
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));

	if (!rport || fc_remote_port_chkready(rport))
		return -ENXIO;

	sdev->hostdata = rport->dd_data;

	return 0;
}

K
Krishna Gudipati 已提交
915 916 917
static u32
bfad_im_supported_speeds(struct bfa_s *bfa)
{
918
	struct bfa_ioc_attr_s *ioc_attr;
K
Krishna Gudipati 已提交
919 920
	u32 supported_speed = 0;

921 922 923 924
	ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
	if (!ioc_attr)
		return 0;

925
	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
926 927
	if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
		if (ioc_attr->adapter_attr.is_mezz) {
K
Krishna Gudipati 已提交
928 929 930 931 932 933 934 935
			supported_speed |= FC_PORTSPEED_8GBIT |
				FC_PORTSPEED_4GBIT |
				FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
		} else {
			supported_speed |= FC_PORTSPEED_8GBIT |
				FC_PORTSPEED_4GBIT |
				FC_PORTSPEED_2GBIT;
		}
936
	} else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
K
Krishna Gudipati 已提交
937 938
		supported_speed |=  FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
				FC_PORTSPEED_1GBIT;
939
	} else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
K
Krishna Gudipati 已提交
940 941
		supported_speed |= FC_PORTSPEED_10GBIT;
	}
942
	kfree(ioc_attr);
K
Krishna Gudipati 已提交
943 944 945
	return supported_speed;
}

946 947 948 949 950 951
void
bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
{
	struct Scsi_Host *host = im_port->shost;
	struct bfad_s         *bfad = im_port->bfad;
	struct bfad_port_s    *port = im_port->port;
K
Krishna Gudipati 已提交
952
	char symname[BFA_SYMNAME_MAXLEN];
953
	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
954 955

	fc_host_node_name(host) =
956
		cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
957
	fc_host_port_name(host) =
958
		cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
959
	fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
960 961 962 963 964

	fc_host_supported_classes(host) = FC_COS_CLASS3;

	memset(fc_host_supported_fc4s(host), 0,
	       sizeof(fc_host_supported_fc4s(host)));
K
Krishna Gudipati 已提交
965
	if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
966 967 968 969 970
		/* For FCP type 0x08 */
		fc_host_supported_fc4s(host)[2] = 1;
	/* For fibre channel services type 0x20 */
	fc_host_supported_fc4s(host)[7] = 1;

971
	strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
K
Krishna Gudipati 已提交
972 973
		BFA_SYMNAME_MAXLEN);
	sprintf(fc_host_symbolic_name(host), "%s", symname);
974

K
Krishna Gudipati 已提交
975
	fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
J
Jing Huang 已提交
976
	fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
977 978 979 980 981 982 983 984 985 986
}

static void
bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
{
	struct fc_rport_identifiers rport_ids;
	struct fc_rport *fc_rport;
	struct bfad_itnim_data_s *itnim_data;

	rport_ids.node_name =
987
		cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
988
	rport_ids.port_name =
989
		cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	rport_ids.port_id =
		bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;

	itnim->fc_rport = fc_rport =
		fc_remote_port_add(im_port->shost, 0, &rport_ids);

	if (!fc_rport)
		return;

	fc_rport->maxframe_size =
		bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim);
	fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim);

	itnim_data = fc_rport->dd_data;
	itnim_data->itnim = itnim;

	rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;

	if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
		fc_remote_port_rolechg(fc_rport, rport_ids.roles);

	if ((fc_rport->scsi_target_id != -1)
	    && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
		itnim->scsi_tgt_id = fc_rport->scsi_target_id;

	return;
}

J
Jing Huang 已提交
1019
/*
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
 * Work queue handler using FC transport service
* Context: kernel
 */
static void
bfad_im_itnim_work_handler(struct work_struct *work)
{
	struct bfad_itnim_s   *itnim = container_of(work, struct bfad_itnim_s,
							itnim_work);
	struct bfad_im_s      *im = itnim->im;
	struct bfad_s         *bfad = im->bfad;
	struct bfad_im_port_s *im_port;
	unsigned long   flags;
	struct fc_rport *fc_rport;
	wwn_t wwpn;
	u32 fcid;
	char wwpn_str[32], fcid_str[16];

	spin_lock_irqsave(&bfad->bfad_lock, flags);
	im_port = itnim->im_port;
	bfa_trc(bfad, itnim->state);
	switch (itnim->state) {
	case ITNIM_STATE_ONLINE:
		if (!itnim->fc_rport) {
			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
			bfad_im_fc_rport_add(im_port, itnim);
			spin_lock_irqsave(&bfad->bfad_lock, flags);
			wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
			fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
			wwn2str(wwpn_str, wwpn);
			fcid2str(fcid_str, fcid);
			list_add_tail(&itnim->list_entry,
				&im_port->itnim_mapped_list);
1052
			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
K
Krishna Gudipati 已提交
1053 1054
				"ITNIM ONLINE Target: %d:0:%d "
				"FCID: %s WWPN: %s\n",
1055 1056 1057 1058 1059 1060
				im_port->shost->host_no,
				itnim->scsi_tgt_id,
				fcid_str, wwpn_str);
		} else {
			printk(KERN_WARNING
				"%s: itnim %llx is already in online state\n",
1061
				__func__,
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
				bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
		}

		break;
	case ITNIM_STATE_OFFLINE_PENDING:
		itnim->state = ITNIM_STATE_OFFLINE;
		if (itnim->fc_rport) {
			fc_rport = itnim->fc_rport;
			((struct bfad_itnim_data_s *)
				fc_rport->dd_data)->itnim = NULL;
			itnim->fc_rport = NULL;
			if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
				spin_unlock_irqrestore(&bfad->bfad_lock, flags);
				fc_rport->dev_loss_tmo =
					bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
				fc_remote_port_delete(fc_rport);
				spin_lock_irqsave(&bfad->bfad_lock, flags);
			}
			wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
			fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
			wwn2str(wwpn_str, wwpn);
			fcid2str(fcid_str, fcid);
			list_del(&itnim->list_entry);
1085
			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
K
Krishna Gudipati 已提交
1086 1087
				"ITNIM OFFLINE Target: %d:0:%d "
				"FCID: %s WWPN: %s\n",
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
				im_port->shost->host_no,
				itnim->scsi_tgt_id,
				fcid_str, wwpn_str);
		}
		break;
	case ITNIM_STATE_FREE:
		if (itnim->fc_rport) {
			fc_rport = itnim->fc_rport;
			((struct bfad_itnim_data_s *)
				fc_rport->dd_data)->itnim = NULL;
			itnim->fc_rport = NULL;
			if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
				spin_unlock_irqrestore(&bfad->bfad_lock, flags);
				fc_rport->dev_loss_tmo =
					bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
				fc_remote_port_delete(fc_rport);
				spin_lock_irqsave(&bfad->bfad_lock, flags);
			}
			list_del(&itnim->list_entry);
		}

		kfree(itnim);
		break;
	default:
		bfa_assert(0);
		break;
	}

	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}

J
Jing Huang 已提交
1119
/*
1120 1121 1122
 * Scsi_Host template entry, queue a SCSI command to the BFAD.
 */
static int
J
Jeff Garzik 已提交
1123
bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1124 1125 1126 1127 1128 1129 1130 1131 1132
{
	struct bfad_im_port_s *im_port =
		(struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
	struct bfad_s         *bfad = im_port->bfad;
	struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
	struct bfad_itnim_s   *itnim;
	struct bfa_ioim_s *hal_io;
	unsigned long   flags;
	int             rc;
K
Krishna Gudipati 已提交
1133
	int       sg_cnt = 0;
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));

	rc = fc_remote_port_chkready(rport);
	if (rc) {
		cmnd->result = rc;
		done(cmnd);
		return 0;
	}

	sg_cnt = scsi_dma_map(cmnd);
	if (sg_cnt < 0)
		return SCSI_MLQUEUE_HOST_BUSY;

	cmnd->scsi_done = done;

	spin_lock_irqsave(&bfad->bfad_lock, flags);
	if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
		printk(KERN_WARNING
			"bfad%d, queuecommand %p %x failed, BFA stopped\n",
		       bfad->inst_no, cmnd, cmnd->cmnd[0]);
		cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
		goto out_fail_cmd;
	}

K
Krishna Gudipati 已提交
1158

1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	itnim = itnim_data->itnim;
	if (!itnim) {
		cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
		goto out_fail_cmd;
	}

	hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd,
				    itnim->bfa_itnim, sg_cnt);
	if (!hal_io) {
		printk(KERN_WARNING "hal_io failure\n");
		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
		scsi_dma_unmap(cmnd);
		return SCSI_MLQUEUE_HOST_BUSY;
	}

	cmnd->host_scribble = (char *)hal_io;
	bfa_trc_fp(bfad, hal_io->iotag);
	bfa_ioim_start(hal_io);
	spin_unlock_irqrestore(&bfad->bfad_lock, flags);

	return 0;

out_fail_cmd:
	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
	scsi_dma_unmap(cmnd);
	if (done)
		done(cmnd);

	return 0;
}

J
Jeff Garzik 已提交
1190 1191
static DEF_SCSI_QCMD(bfad_im_queuecommand)

1192 1193 1194 1195 1196 1197 1198
void
bfad_os_rport_online_wait(struct bfad_s *bfad)
{
	int i;
	int rport_delay = 10;

	for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
K
Krishna Gudipati 已提交
1199 1200 1201 1202
		&& i < bfa_linkup_delay; i++) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(HZ);
	}
1203 1204 1205

	if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
		rport_delay = rport_delay < bfa_linkup_delay ?
K
Krishna Gudipati 已提交
1206
			rport_delay : bfa_linkup_delay;
1207
		for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
K
Krishna Gudipati 已提交
1208 1209 1210 1211
			&& i < rport_delay; i++) {
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_timeout(HZ);
		}
1212

K
Krishna Gudipati 已提交
1213 1214 1215 1216
		if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_timeout(rport_delay * HZ);
		}
1217 1218 1219 1220 1221 1222
	}
}

int
bfad_os_get_linkup_delay(struct bfad_s *bfad)
{
K
Krishna Gudipati 已提交
1223 1224 1225
	u8		nwwns = 0;
	wwn_t		wwns[BFA_PREBOOT_BOOTLUN_MAX];
	int		linkup_delay;
1226 1227 1228 1229

	/*
	 * Querying for the boot target port wwns
	 * -- read from boot information in flash.
K
Krishna Gudipati 已提交
1230 1231
	 * If nwwns > 0 => boot over SAN and set linkup_delay = 30
	 * else => local boot machine set linkup_delay = 0
1232 1233
	 */

J
Jing Huang 已提交
1234
	bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
1235

K
Krishna Gudipati 已提交
1236 1237 1238 1239 1240 1241
	if (nwwns > 0)
		/* If Boot over SAN set linkup_delay = 30sec */
		linkup_delay = 30;
	else
		/* If local boot; no linkup_delay */
		linkup_delay = 0;
1242

K
Krishna Gudipati 已提交
1243
	return linkup_delay;
1244
}