qla_os.c 108.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
A
Andrew Vasquez 已提交
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2010 QLogic Corporation
L
Linus Torvalds 已提交
4
 *
A
Andrew Vasquez 已提交
5
 * See LICENSE.qla2xxx for copyright and licensing details.
L
Linus Torvalds 已提交
6 7 8 9 10 11
 */
#include "qla_def.h"

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
12
#include <linux/kthread.h>
13
#include <linux/mutex.h>
14
#include <linux/kobject.h>
15
#include <linux/slab.h>
L
Linus Torvalds 已提交
16 17 18 19 20 21 22 23 24 25 26

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>

/*
 * Driver version
 */
char qla2x00_version_str[40];

27 28
static int apidev_major;

L
Linus Torvalds 已提交
29 30 31
/*
 * SRB allocation cache
 */
32
static struct kmem_cache *srb_cachep;
L
Linus Torvalds 已提交
33

34 35 36 37 38
/*
 * CT6 CTX allocation cache
 */
static struct kmem_cache *ctx_cachep;

L
Linus Torvalds 已提交
39 40 41 42 43
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
		"Login timeout value in seconds.");

44
int qlport_down_retry;
L
Linus Torvalds 已提交
45 46
module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(qlport_down_retry,
47
		"Maximum number of command retries to a port that returns "
L
Linus Torvalds 已提交
48 49 50 51 52 53
		"a PORT-DOWN status.");

int ql2xplogiabsentdevice;
module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xplogiabsentdevice,
		"Option to enable PLOGI to devices that are not present after "
54
		"a Fabric scan.  This is needed for several broken switches. "
L
Linus Torvalds 已提交
55 56 57 58 59 60 61
		"Default is 0 - no PLOGI. 1 - perfom PLOGI.");

int ql2xloginretrycount = 0;
module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xloginretrycount,
		"Specify an alternate value for the NVRAM login retry count.");

62 63 64 65 66 67 68
int ql2xallocfwdump = 1;
module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xallocfwdump,
		"Option to enable allocation of memory for a firmware dump "
		"during HBA initialization.  Memory allocation requirements "
		"vary by ISP type.  Default is 1 - allocate memory.");

69
int ql2xextended_error_logging;
70
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
71
MODULE_PARM_DESC(ql2xextended_error_logging,
72 73 74
		"Option to enable extended error logging, "
		"Default is 0 - no logging. 1 - log errors.");

75 76 77 78 79 80
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xshiftctondsd,
		"Set to control shifting of command type processing "
		"based on total number of SG elements.");

L
Linus Torvalds 已提交
81 82
static void qla2x00_free_device(scsi_qla_host_t *);

83
int ql2xfdmienable=1;
84 85
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
86 87
		"Enables FDMI registrations. "
		"0 - no FDMI. Default is 1 - perform FDMI.");
88

89 90 91 92 93 94
#define MAX_Q_DEPTH    32
static int ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
		"Maximum queue depth to report for target devices.");

95 96 97 98 99 100 101 102 103 104 105 106 107
/* Do not change the value of this after module load */
int ql2xenabledif = 1;
module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenabledif,
		" Enable T10-CRC-DIF "
		" Default is 0 - No DIF Support. 1 - Enable it");

int ql2xenablehba_err_chk;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
		" Enable T10-CRC-DIF Error isolation by HBA"
		" Default is 0 - Error isolation disabled, 1 - Enable it");

108 109 110 111 112 113
int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xiidmaenable,
		"Enables iIDMA settings "
		"Default is 1 - perform iIDMA. 0 - no iIDMA.");

114 115 116 117
int ql2xmaxqueues = 1;
module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xmaxqueues,
		"Enables MQ settings "
118 119
		"Default is 1 for single queue. Set it to number "
		"of queues in MQ mode.");
120 121 122 123 124 125 126

int ql2xmultique_tag;
module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xmultique_tag,
		"Enables CPU affinity settings for the driver "
		"Default is 0 for no affinity of request and response IO. "
		"Set it to 1 to turn on the cpu affinity.");
127 128 129 130 131 132 133 134 135 136

int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfwloadbin,
		"Option to specify location from which to load ISP firmware:\n"
		" 2 -- load firmware via the request_firmware() (hotplug)\n"
		"      interface.\n"
		" 1 -- load firmware from flash.\n"
		" 0 -- use default semantics.\n");

137 138 139 140 141 142
int ql2xetsenable;
module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xetsenable,
		"Enables firmware ETS burst."
		"Default is 0 - skip ETS enablement.");

143
int ql2xdbwr = 1;
144 145 146 147 148 149 150 151 152 153 154 155 156
module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xdbwr,
	"Option to specify scheme for request queue posting\n"
	" 0 -- Regular doorbell.\n"
	" 1 -- CAMRAM doorbell (faster).\n");

int ql2xdontresethba;
module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xdontresethba,
	"Option to specify reset behaviour\n"
	" 0 (Default) -- Reset on failure.\n"
	" 1 -- Do not reset on failure.\n");

157 158 159 160 161 162
int ql2xtargetreset = 1;
module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xtargetreset,
		 "Enable target reset."
		 "Default is 1 - use hw defaults.");

163

164 165 166 167 168
int ql2xasynctmfenable;
module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xasynctmfenable,
		"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
		"Default is 0 - Issue TM IOCBs via mailbox mechanism.");
L
Linus Torvalds 已提交
169
/*
A
Andrew Vasquez 已提交
170
 * SCSI host template entry points
L
Linus Torvalds 已提交
171 172
 */
static int qla2xxx_slave_configure(struct scsi_device * device);
已提交
173
static int qla2xxx_slave_alloc(struct scsi_device *);
174 175
static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
static void qla2xxx_scan_start(struct Scsi_Host *);
已提交
176
static void qla2xxx_slave_destroy(struct scsi_device *);
177
static int qla2xxx_queuecommand(struct scsi_cmnd *cmd,
178
		void (*fn)(struct scsi_cmnd *));
L
Linus Torvalds 已提交
179 180
static int qla2xxx_eh_abort(struct scsi_cmnd *);
static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
181
static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
L
Linus Torvalds 已提交
182 183 184
static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
static int qla2xxx_eh_host_reset(struct scsi_cmnd *);

185
static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
186 187
static int qla2x00_change_queue_type(struct scsi_device *, int);

188
struct scsi_host_template qla2xxx_driver_template = {
L
Linus Torvalds 已提交
189
	.module			= THIS_MODULE,
190
	.name			= QLA2XXX_DRIVER_NAME,
191
	.queuecommand		= qla2xxx_queuecommand,
192 193 194

	.eh_abort_handler	= qla2xxx_eh_abort,
	.eh_device_reset_handler = qla2xxx_eh_device_reset,
195
	.eh_target_reset_handler = qla2xxx_eh_target_reset,
196 197 198 199 200 201 202
	.eh_bus_reset_handler	= qla2xxx_eh_bus_reset,
	.eh_host_reset_handler	= qla2xxx_eh_host_reset,

	.slave_configure	= qla2xxx_slave_configure,

	.slave_alloc		= qla2xxx_slave_alloc,
	.slave_destroy		= qla2xxx_slave_destroy,
203 204
	.scan_finished		= qla2xxx_scan_finished,
	.scan_start		= qla2xxx_scan_start,
205 206
	.change_queue_depth	= qla2x00_change_queue_depth,
	.change_queue_type	= qla2x00_change_queue_type,
207 208 209 210 211 212
	.this_id		= -1,
	.cmd_per_lun		= 3,
	.use_clustering		= ENABLE_CLUSTERING,
	.sg_tablesize		= SG_ALL,

	.max_sectors		= 0xFFFF,
213
	.shost_attrs		= qla2x00_host_attrs,
214 215
};

L
Linus Torvalds 已提交
216
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
217
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
L
Linus Torvalds 已提交
218 219 220 221 222 223

/* TODO Convert to inlines
 *
 * Timer routines
 */

224
__inline__ void
225
qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
L
Linus Torvalds 已提交
226
{
227 228 229 230 231 232
	init_timer(&vha->timer);
	vha->timer.expires = jiffies + interval * HZ;
	vha->timer.data = (unsigned long)vha;
	vha->timer.function = (void (*)(unsigned long))func;
	add_timer(&vha->timer);
	vha->timer_active = 1;
L
Linus Torvalds 已提交
233 234 235
}

static inline void
236
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
L
Linus Torvalds 已提交
237
{
238 239 240 241
	/* Currently used for 82XX only. */
	if (vha->device_flags & DFLG_DEV_FAILED)
		return;

242
	mod_timer(&vha->timer, jiffies + interval * HZ);
L
Linus Torvalds 已提交
243 244
}

A
Adrian Bunk 已提交
245
static __inline__ void
246
qla2x00_stop_timer(scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
247
{
248 249
	del_timer_sync(&vha->timer);
	vha->timer_active = 0;
L
Linus Torvalds 已提交
250 251 252 253 254 255
}

static int qla2x00_do_dpc(void *data);

static void qla2x00_rst_aen(scsi_qla_host_t *);

256 257
static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
	struct req_que **, struct rsp_que **);
258 259
static void qla2x00_mem_free(struct qla_hw_data *);
static void qla2x00_sp_free_dma(srb_t *);
L
Linus Torvalds 已提交
260 261

/* -------------------------------------------------------------------------- */
262 263
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
{
264
	ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
265 266 267 268 269 270 271
				GFP_KERNEL);
	if (!ha->req_q_map) {
		qla_printk(KERN_WARNING, ha,
			"Unable to allocate memory for request queue ptrs\n");
		goto fail_req_map;
	}

272
	ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
				GFP_KERNEL);
	if (!ha->rsp_q_map) {
		qla_printk(KERN_WARNING, ha,
			"Unable to allocate memory for response queue ptrs\n");
		goto fail_rsp_map;
	}
	set_bit(0, ha->rsp_qid_map);
	set_bit(0, ha->req_qid_map);
	return 1;

fail_rsp_map:
	kfree(ha->req_q_map);
	ha->req_q_map = NULL;
fail_req_map:
	return -ENOMEM;
}

290
static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
291 292 293 294 295 296 297 298 299 300
{
	if (req && req->ring)
		dma_free_coherent(&ha->pdev->dev,
		(req->length + 1) * sizeof(request_t),
		req->ring, req->dma);

	kfree(req);
	req = NULL;
}

301 302 303 304 305 306 307 308 309 310 311
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
{
	if (rsp && rsp->ring)
		dma_free_coherent(&ha->pdev->dev,
		(rsp->length + 1) * sizeof(response_t),
		rsp->ring, rsp->dma);

	kfree(rsp);
	rsp = NULL;
}

312 313 314 315 316 317
static void qla2x00_free_queues(struct qla_hw_data *ha)
{
	struct req_que *req;
	struct rsp_que *rsp;
	int cnt;

318
	for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
319
		req = ha->req_q_map[cnt];
320
		qla2x00_free_req_que(ha, req);
321 322 323
	}
	kfree(ha->req_q_map);
	ha->req_q_map = NULL;
324 325 326 327 328 329 330

	for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
		rsp = ha->rsp_q_map[cnt];
		qla2x00_free_rsp_que(ha, rsp);
	}
	kfree(ha->rsp_q_map);
	ha->rsp_q_map = NULL;
331 332
}

333 334 335 336 337 338
static int qla25xx_setup_mode(struct scsi_qla_host *vha)
{
	uint16_t options = 0;
	int ques, req, ret;
	struct qla_hw_data *ha = vha->hw;

339 340 341 342 343
	if (!(ha->fw_attributes & BIT_6)) {
		qla_printk(KERN_INFO, ha,
			"Firmware is not multi-queue capable\n");
		goto fail;
	}
344 345 346 347 348 349 350 351 352 353
	if (ql2xmultique_tag) {
		/* create a request queue for IO */
		options |= BIT_7;
		req = qla25xx_create_req_que(ha, options, 0, 0, -1,
			QLA_DEFAULT_QUE_QOS);
		if (!req) {
			qla_printk(KERN_WARNING, ha,
				"Can't create request queue\n");
			goto fail;
		}
354
		ha->wq = create_workqueue("qla2xxx_wq");
355 356 357 358 359 360 361 362 363 364
		vha->req = ha->req_q_map[req];
		options |= BIT_1;
		for (ques = 1; ques < ha->max_rsp_queues; ques++) {
			ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
			if (!ret) {
				qla_printk(KERN_WARNING, ha,
					"Response Queue create failed\n");
				goto fail2;
			}
		}
365 366
		ha->flags.cpu_affinity_enabled = 1;

367 368 369 370 371 372 373 374
		DEBUG2(qla_printk(KERN_INFO, ha,
			"CPU affinity mode enabled, no. of response"
			" queues:%d, no. of request queues:%d\n",
			ha->max_rsp_queues, ha->max_req_queues));
	}
	return 0;
fail2:
	qla25xx_delete_queues(vha);
375 376
	destroy_workqueue(ha->wq);
	ha->wq = NULL;
377 378
fail:
	ha->mqenable = 0;
379 380 381
	kfree(ha->req_q_map);
	kfree(ha->rsp_q_map);
	ha->max_req_queues = ha->max_rsp_queues = 1;
382 383 384
	return 1;
}

L
Linus Torvalds 已提交
385
static char *
386
qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
L
Linus Torvalds 已提交
387
{
388
	struct qla_hw_data *ha = vha->hw;
L
Linus Torvalds 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	static char *pci_bus_modes[] = {
		"33", "66", "100", "133",
	};
	uint16_t pci_bus;

	strcpy(str, "PCI");
	pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
	if (pci_bus) {
		strcat(str, "-X (");
		strcat(str, pci_bus_modes[pci_bus]);
	} else {
		pci_bus = (ha->pci_attr & BIT_8) >> 8;
		strcat(str, " (");
		strcat(str, pci_bus_modes[pci_bus]);
	}
	strcat(str, " MHz)");

	return (str);
}

409
static char *
410
qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
411 412
{
	static char *pci_bus_modes[] = { "33", "66", "100", "133", };
413
	struct qla_hw_data *ha = vha->hw;
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
	uint32_t pci_bus;
	int pcie_reg;

	pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
	if (pcie_reg) {
		char lwstr[6];
		uint16_t pcie_lstat, lspeed, lwidth;

		pcie_reg += 0x12;
		pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
		lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
		lwidth = (pcie_lstat &
		    (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;

		strcpy(str, "PCIe (");
		if (lspeed == 1)
430
			strcat(str, "2.5GT/s ");
431
		else if (lspeed == 2)
432
			strcat(str, "5.0GT/s ");
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
		else
			strcat(str, "<unknown> ");
		snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
		strcat(str, lwstr);

		return str;
	}

	strcpy(str, "PCI");
	pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
	if (pci_bus == 0 || pci_bus == 8) {
		strcat(str, " (");
		strcat(str, pci_bus_modes[pci_bus >> 3]);
	} else {
		strcat(str, "-X ");
		if (pci_bus & BIT_2)
			strcat(str, "Mode 2");
		else
			strcat(str, "Mode 1");
		strcat(str, " (");
		strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
	}
	strcat(str, " MHz)");

	return str;
}

460
static char *
461
qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
L
Linus Torvalds 已提交
462 463
{
	char un_str[10];
464
	struct qla_hw_data *ha = vha->hw;
A
Andrew Vasquez 已提交
465

L
Linus Torvalds 已提交
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
	    ha->fw_minor_version,
	    ha->fw_subminor_version);

	if (ha->fw_attributes & BIT_9) {
		strcat(str, "FLX");
		return (str);
	}

	switch (ha->fw_attributes & 0xFF) {
	case 0x7:
		strcat(str, "EF");
		break;
	case 0x17:
		strcat(str, "TP");
		break;
	case 0x37:
		strcat(str, "IP");
		break;
	case 0x77:
		strcat(str, "VI");
		break;
	default:
		sprintf(un_str, "(%x)", ha->fw_attributes);
		strcat(str, un_str);
		break;
	}
	if (ha->fw_attributes & 0x100)
		strcat(str, "X");

	return (str);
}

499
static char *
500
qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
501
{
502
	struct qla_hw_data *ha = vha->hw;
503

504 505
	sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
	    ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
506 507 508 509
	return str;
}

static inline srb_t *
510
qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
511 512 513
    struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
	srb_t *sp;
514
	struct qla_hw_data *ha = vha->hw;
515 516 517 518 519

	sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
	if (!sp)
		return sp;

520
	atomic_set(&sp->ref_count, 1);
521 522 523 524 525
	sp->fcport = fcport;
	sp->cmd = cmd;
	sp->flags = 0;
	CMD_SP(cmd) = (void *)sp;
	cmd->scsi_done = done;
526
	sp->ctx = NULL;
527 528 529 530

	return sp;
}

L
Linus Torvalds 已提交
531
static int
532
qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
533
{
534
	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
535
	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
536
	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
537 538
	struct qla_hw_data *ha = vha->hw;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
539 540 541
	srb_t *sp;
	int rval;

542 543
	if (ha->flags.eeh_busy) {
		if (ha->flags.pci_channel_io_perm_failure)
544
			cmd->result = DID_NO_CONNECT << 16;
545 546
		else
			cmd->result = DID_REQUEUE << 16;
547 548 549
		goto qc24_fail_command;
	}

550 551 552
	rval = fc_remote_port_chkready(rport);
	if (rval) {
		cmd->result = rval;
553 554 555
		goto qc24_fail_command;
	}

556
	/* Close window on fcport/rport state-transitioning. */
557 558
	if (fcport->drport)
		goto qc24_target_busy;
559

560 561 562 563 564 565 566 567
	if (!vha->flags.difdix_supported &&
		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
			DEBUG2(qla_printk(KERN_ERR, ha,
			    "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
			    cmd->cmnd[0]));
			cmd->result = DID_NO_CONNECT << 16;
			goto qc24_fail_command;
	}
568 569
	if (atomic_read(&fcport->state) != FCS_ONLINE) {
		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
570
		    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
571 572 573
			cmd->result = DID_NO_CONNECT << 16;
			goto qc24_fail_command;
		}
574
		goto qc24_target_busy;
575 576
	}

577
	spin_unlock_irq(vha->host->host_lock);
578

579
	sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
580 581 582
	if (!sp)
		goto qc24_host_busy_lock;

583
	rval = ha->isp_ops->start_scsi(sp);
584 585 586
	if (rval != QLA_SUCCESS)
		goto qc24_host_busy_free_sp;

587
	spin_lock_irq(vha->host->host_lock);
588 589 590 591

	return 0;

qc24_host_busy_free_sp:
592 593
	qla2x00_sp_free_dma(sp);
	mempool_free(sp, ha->srb_mempool);
594 595

qc24_host_busy_lock:
596
	spin_lock_irq(vha->host->host_lock);
597 598
	return SCSI_MLQUEUE_HOST_BUSY;

599 600 601
qc24_target_busy:
	return SCSI_MLQUEUE_TARGET_BUSY;

602 603 604 605 606 607 608
qc24_fail_command:
	done(cmd);

	return 0;
}


L
Linus Torvalds 已提交
609 610 611 612 613 614 615 616 617 618 619 620 621
/*
 * qla2x00_eh_wait_on_command
 *    Waits for the command to be returned by the Firmware for some
 *    max time.
 *
 * Input:
 *    cmd = Scsi Command to wait on.
 *
 * Return:
 *    Not Found : 0
 *    Found : 1
 */
static int
622
qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
L
Linus Torvalds 已提交
623
{
624 625
#define ABORT_POLLING_PERIOD	1000
#define ABORT_WAIT_ITER		((10 * 1000) / (ABORT_POLLING_PERIOD))
已提交
626
	unsigned long wait_iter = ABORT_WAIT_ITER;
627 628
	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
	struct qla_hw_data *ha = vha->hw;
已提交
629
	int ret = QLA_SUCCESS;
L
Linus Torvalds 已提交
630

631 632 633 634 635
	if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
		DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
		return ret;
	}

636
	while (CMD_SP(cmd) && wait_iter--) {
637
		msleep(ABORT_POLLING_PERIOD);
已提交
638 639 640
	}
	if (CMD_SP(cmd))
		ret = QLA_FUNCTION_FAILED;
L
Linus Torvalds 已提交
641

已提交
642
	return ret;
L
Linus Torvalds 已提交
643 644 645 646
}

/*
 * qla2x00_wait_for_hba_online
A
Andrew Vasquez 已提交
647
 *    Wait till the HBA is online after going through
L
Linus Torvalds 已提交
648 649 650 651 652
 *    <= MAX_RETRIES_OF_ISP_ABORT  or
 *    finally HBA is disabled ie marked offline
 *
 * Input:
 *     ha - pointer to host adapter structure
A
Andrew Vasquez 已提交
653 654
 *
 * Note:
L
Linus Torvalds 已提交
655 656 657 658 659 660 661
 *    Does context switching-Release SPIN_LOCK
 *    (if any) before calling this routine.
 *
 * Return:
 *    Success (Adapter is online) : 0
 *    Failed  (Adapter is offline/disabled) : 1
 */
662
int
663
qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
664
{
665 666
	int		return_status;
	unsigned long	wait_online;
667 668
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
669

A
Andrew Vasquez 已提交
670
	wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
671 672 673 674
	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
	    ha->dpc_active) && time_before(jiffies, wait_online)) {
L
Linus Torvalds 已提交
675 676 677

		msleep(1000);
	}
678
	if (base_vha->flags.online)
A
Andrew Vasquez 已提交
679
		return_status = QLA_SUCCESS;
L
Linus Torvalds 已提交
680 681 682 683 684 685
	else
		return_status = QLA_FUNCTION_FAILED;

	return (return_status);
}

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
/*
 * qla2x00_wait_for_reset_ready
 *    Wait till the HBA is online after going through
 *    <= MAX_RETRIES_OF_ISP_ABORT  or
 *    finally HBA is disabled ie marked offline or flash
 *    operations are in progress.
 *
 * Input:
 *     ha - pointer to host adapter structure
 *
 * Note:
 *    Does context switching-Release SPIN_LOCK
 *    (if any) before calling this routine.
 *
 * Return:
 *    Success (Adapter is online/no flash ops) : 0
 *    Failed  (Adapter is offline/disabled/flash ops in progress) : 1
 */
704
static int
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
{
	int		return_status;
	unsigned long	wait_online;
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);

	wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
	    ha->optrom_state != QLA_SWAITING ||
	    ha->dpc_active) && time_before(jiffies, wait_online))
		msleep(1000);

	if (base_vha->flags.online &&  ha->optrom_state == QLA_SWAITING)
		return_status = QLA_SUCCESS;
	else
		return_status = QLA_FUNCTION_FAILED;

	DEBUG2(printk("%s return_status=%d\n", __func__, return_status));

	return return_status;
}

730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
int
qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
{
	int		return_status;
	unsigned long	wait_reset;
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);

	wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
	    ha->dpc_active) && time_before(jiffies, wait_reset)) {

		msleep(1000);

		if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
		    ha->flags.chip_reset_done)
			break;
	}
	if (ha->flags.chip_reset_done)
		return_status = QLA_SUCCESS;
	else
		return_status = QLA_FUNCTION_FAILED;

	return return_status;
}

L
Linus Torvalds 已提交
758 759 760
/*
 * qla2x00_wait_for_loop_ready
 *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
A
Andrew Vasquez 已提交
761
 *    to be in LOOP_READY state.
L
Linus Torvalds 已提交
762 763
 * Input:
 *     ha - pointer to host adapter structure
A
Andrew Vasquez 已提交
764 765
 *
 * Note:
L
Linus Torvalds 已提交
766 767
 *    Does context switching-Release SPIN_LOCK
 *    (if any) before calling this routine.
A
Andrew Vasquez 已提交
768
 *
L
Linus Torvalds 已提交
769 770 771 772 773
 *
 * Return:
 *    Success (LOOP_READY) : 0
 *    Failed  (LOOP_NOT_READY) : 1
 */
A
Andrew Vasquez 已提交
774
static inline int
775
qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
776 777 778
{
	int 	 return_status = QLA_SUCCESS;
	unsigned long loop_timeout ;
779 780
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
781 782

	/* wait for 5 min at the max for loop to be ready */
A
Andrew Vasquez 已提交
783
	loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
L
Linus Torvalds 已提交
784

785 786 787 788
	while ((!atomic_read(&base_vha->loop_down_timer) &&
	    atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
	    atomic_read(&base_vha->loop_state) != LOOP_READY) {
		if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
789 790 791
			return_status = QLA_FUNCTION_FAILED;
			break;
		}
L
Linus Torvalds 已提交
792 793 794 795 796 797
		msleep(1000);
		if (time_after_eq(jiffies, loop_timeout)) {
			return_status = QLA_FUNCTION_FAILED;
			break;
		}
	}
A
Andrew Vasquez 已提交
798
	return (return_status);
L
Linus Torvalds 已提交
799 800
}

801 802 803 804 805 806
static void
sp_get(struct srb *sp)
{
	atomic_inc(&sp->ref_count);
}

L
Linus Torvalds 已提交
807 808 809 810 811 812 813 814 815 816 817 818 819
/**************************************************************************
* qla2xxx_eh_abort
*
* Description:
*    The abort function will abort the specified command.
*
* Input:
*    cmd = Linux SCSI command packet to be aborted.
*
* Returns:
*    Either SUCCESS or FAILED.
*
* Note:
820
*    Only return FAILED if command not returned by firmware.
L
Linus Torvalds 已提交
821
**************************************************************************/
822
static int
L
Linus Torvalds 已提交
823 824
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
825
	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
已提交
826
	srb_t *sp;
827
	int ret, i;
已提交
828 829
	unsigned int id, lun;
	unsigned long serial;
830
	unsigned long flags;
831
	int wait = 0;
832
	struct qla_hw_data *ha = vha->hw;
833
	struct req_que *req = vha->req;
834
	srb_t *spt;
835
	int got_ref = 0;
L
Linus Torvalds 已提交
836

837
	fc_block_scsi_eh(cmd);
838

已提交
839
	if (!CMD_SP(cmd))
840
		return SUCCESS;
L
Linus Torvalds 已提交
841

842
	ret = SUCCESS;
L
Linus Torvalds 已提交
843

已提交
844 845 846
	id = cmd->device->id;
	lun = cmd->device->lun;
	serial = cmd->serial_number;
847 848 849
	spt = (srb_t *) CMD_SP(cmd);
	if (!spt)
		return SUCCESS;
L
Linus Torvalds 已提交
850

已提交
851
	/* Check active list for command command. */
852
	spin_lock_irqsave(&ha->hardware_lock, flags);
853 854
	for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
		sp = req->outstanding_cmds[i];
L
Linus Torvalds 已提交
855

856 857
		if (sp == NULL)
			continue;
858 859
		if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
		    !IS_PROT_IO(sp))
860
			continue;
861 862
		if (sp->cmd != cmd)
			continue;
L
Linus Torvalds 已提交
863

864 865 866
		DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
		" pid=%ld.\n", __func__, vha->host_no, sp, serial));

867 868 869 870
		/* Get a reference to the sp and drop the lock.*/
		sp_get(sp);
		got_ref++;

871
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
872
		if (ha->isp_ops->abort_command(sp)) {
873 874
			DEBUG2(printk("%s(%ld): abort_command "
			"mbx failed.\n", __func__, vha->host_no));
875
			ret = FAILED;
876 877 878 879
		} else {
			DEBUG3(printk("%s(%ld): abort_command "
			"mbx success.\n", __func__, vha->host_no));
			wait = 1;
880
		}
881 882
		spin_lock_irqsave(&ha->hardware_lock, flags);
		break;
已提交
883
	}
884
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
885

已提交
886
	/* Wait for the command to be returned. */
887
	if (wait) {
888
		if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
A
Andrew Vasquez 已提交
889
			qla_printk(KERN_ERR, ha,
已提交
890
			    "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
891
			    "%x.\n", vha->host_no, id, lun, serial, ret);
892
			ret = FAILED;
已提交
893
		}
L
Linus Torvalds 已提交
894 895
	}

896 897 898
	if (got_ref)
		qla2x00_sp_compl(ha, sp);

A
Andrew Vasquez 已提交
899
	qla_printk(KERN_INFO, ha,
900
	    "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
901
	    vha->host_no, id, lun, wait, serial, ret);
L
Linus Torvalds 已提交
902

已提交
903 904
	return ret;
}
L
Linus Torvalds 已提交
905

906
int
907
qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
908
	unsigned int l, enum nexus_wait_type type)
已提交
909
{
910
	int cnt, match, status;
911
	unsigned long flags;
912
	struct qla_hw_data *ha = vha->hw;
913
	struct req_que *req;
914
	srb_t *sp;
L
Linus Torvalds 已提交
915

916
	status = QLA_SUCCESS;
917

918
	spin_lock_irqsave(&ha->hardware_lock, flags);
919
	req = vha->req;
920 921 922 923
	for (cnt = 1; status == QLA_SUCCESS &&
		cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
		sp = req->outstanding_cmds[cnt];
		if (!sp)
924
			continue;
925
		if ((sp->ctx) && !IS_PROT_IO(sp))
926
			continue;
927 928 929 930 931 932 933 934 935 936 937 938 939 940
		if (vha->vp_idx != sp->fcport->vha->vp_idx)
			continue;
		match = 0;
		switch (type) {
		case WAIT_HOST:
			match = 1;
			break;
		case WAIT_TARGET:
			match = sp->cmd->device->id == t;
			break;
		case WAIT_LUN:
			match = (sp->cmd->device->id == t &&
				sp->cmd->device->lun == l);
			break;
941
		}
942 943 944 945 946 947
		if (!match)
			continue;

		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		status = qla2x00_eh_wait_on_command(sp->cmd);
		spin_lock_irqsave(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
948
	}
949
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
950 951

	return status;
L
Linus Torvalds 已提交
952 953
}

954 955 956 957 958 959
static char *reset_errors[] = {
	"HBA not online",
	"HBA not ready",
	"Task management failed",
	"Waiting for command completions",
};
L
Linus Torvalds 已提交
960

961
static int
962
__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
963
    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
L
Linus Torvalds 已提交
964
{
965
	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
966
	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
967
	int err;
L
Linus Torvalds 已提交
968

969
	fc_block_scsi_eh(cmd);
970

971
	if (!fcport)
972
		return FAILED;
L
Linus Torvalds 已提交
973

974 975
	qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
	    vha->host_no, cmd->device->id, cmd->device->lun, name);
L
Linus Torvalds 已提交
976

977
	err = 0;
978
	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
979 980
		goto eh_reset_failed;
	err = 1;
981
	if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
982 983
		goto eh_reset_failed;
	err = 2;
984 985
	if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
		!= QLA_SUCCESS)
986 987
		goto eh_reset_failed;
	err = 3;
988
	if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
989
	    cmd->device->lun, type) != QLA_SUCCESS)
990 991
		goto eh_reset_failed;

992 993
	qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
	    vha->host_no, cmd->device->id, cmd->device->lun, name);
994 995 996

	return SUCCESS;

997
eh_reset_failed:
998 999
	qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
	    , vha->host_no, cmd->device->id, cmd->device->lun, name,
1000 1001 1002
	    reset_errors[err]);
	return FAILED;
}
L
Linus Torvalds 已提交
1003

1004 1005 1006
static int
qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
1007 1008
	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
	struct qla_hw_data *ha = vha->hw;
L
Linus Torvalds 已提交
1009

1010 1011
	return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
	    ha->isp_ops->lun_reset);
L
Linus Torvalds 已提交
1012 1013 1014
}

static int
1015
qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
L
Linus Torvalds 已提交
1016
{
1017 1018
	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
	struct qla_hw_data *ha = vha->hw;
L
Linus Torvalds 已提交
1019

1020 1021
	return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
	    ha->isp_ops->target_reset);
L
Linus Torvalds 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
}

/**************************************************************************
* qla2xxx_eh_bus_reset
*
* Description:
*    The bus reset function will reset the bus and abort any executing
*    commands.
*
* Input:
*    cmd = Linux SCSI command packet of the command that cause the
*          bus reset.
*
* Returns:
*    SUCCESS/FAILURE (defined as macro in scsi.h).
*
**************************************************************************/
1039
static int
L
Linus Torvalds 已提交
1040 1041
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
1042
	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1043
	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1044
	int ret = FAILED;
已提交
1045 1046 1047
	unsigned int id, lun;
	unsigned long serial;

1048
	fc_block_scsi_eh(cmd);
1049

已提交
1050 1051 1052
	id = cmd->device->id;
	lun = cmd->device->lun;
	serial = cmd->serial_number;
L
Linus Torvalds 已提交
1053

1054
	if (!fcport)
已提交
1055
		return ret;
L
Linus Torvalds 已提交
1056

1057
	qla_printk(KERN_INFO, vha->hw,
1058
	    "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
L
Linus Torvalds 已提交
1059

1060
	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
L
Linus Torvalds 已提交
1061
		DEBUG2(printk("%s failed:board disabled\n",__func__));
已提交
1062
		goto eh_bus_reset_done;
L
Linus Torvalds 已提交
1063 1064
	}

1065 1066
	if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
		if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
已提交
1067
			ret = SUCCESS;
L
Linus Torvalds 已提交
1068
	}
已提交
1069 1070
	if (ret == FAILED)
		goto eh_bus_reset_done;
L
Linus Torvalds 已提交
1071

1072
	/* Flush outstanding commands. */
1073
	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1074
	    QLA_SUCCESS)
1075
		ret = FAILED;
L
Linus Torvalds 已提交
1076

已提交
1077
eh_bus_reset_done:
1078
	qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
已提交
1079
	    (ret == FAILED) ? "failed" : "succeded");
L
Linus Torvalds 已提交
1080

已提交
1081
	return ret;
L
Linus Torvalds 已提交
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
}

/**************************************************************************
* qla2xxx_eh_host_reset
*
* Description:
*    The reset function will reset the Adapter.
*
* Input:
*      cmd = Linux SCSI command packet of the command that cause the
*            adapter reset.
*
* Returns:
*      Either SUCCESS or FAILED.
*
* Note:
**************************************************************************/
1099
static int
L
Linus Torvalds 已提交
1100 1101
qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
1102
	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1103
	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1104
	struct qla_hw_data *ha = vha->hw;
1105
	int ret = FAILED;
已提交
1106 1107
	unsigned int id, lun;
	unsigned long serial;
1108
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
1109

1110
	fc_block_scsi_eh(cmd);
1111

已提交
1112 1113 1114 1115
	id = cmd->device->id;
	lun = cmd->device->lun;
	serial = cmd->serial_number;

1116
	if (!fcport)
已提交
1117
		return ret;
L
Linus Torvalds 已提交
1118 1119

	qla_printk(KERN_INFO, ha,
1120
	    "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
L
Linus Torvalds 已提交
1121

1122
	if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
已提交
1123
		goto eh_host_reset_lock;
L
Linus Torvalds 已提交
1124 1125 1126

	/*
	 * Fixme-may be dpc thread is active and processing
A
Andrew Vasquez 已提交
1127
	 * loop_resync,so wait a while for it to
L
Linus Torvalds 已提交
1128 1129 1130 1131 1132
	 * be completed and then issue big hammer.Otherwise
	 * it may cause I/O failure as big hammer marks the
	 * devices as lost kicking of the port_down_timer
	 * while dpc is stuck for the mailbox to complete.
	 */
1133 1134 1135
	qla2x00_wait_for_loop_ready(vha);
	if (vha != base_vha) {
		if (qla2x00_vp_abort_isp(vha))
已提交
1136
			goto eh_host_reset_lock;
1137
	} else {
1138 1139 1140 1141 1142 1143 1144 1145
		if (IS_QLA82XX(vha->hw)) {
			if (!qla82xx_fcoe_ctx_reset(vha)) {
				/* Ctx reset success */
				ret = SUCCESS;
				goto eh_host_reset_lock;
			}
			/* fall thru if ctx reset failed */
		}
1146 1147 1148
		if (ha->wq)
			flush_workqueue(ha->wq);

1149
		set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1150
		if (ha->isp_ops->abort_isp(base_vha)) {
1151 1152 1153 1154 1155 1156 1157 1158
			clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
			/* failed. schedule dpc to try */
			set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);

			if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
				goto eh_host_reset_lock;
		}
		clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
A
Andrew Vasquez 已提交
1159
	}
L
Linus Torvalds 已提交
1160

1161
	/* Waiting for command to be returned to OS.*/
1162
	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1163
		QLA_SUCCESS)
已提交
1164
		ret = SUCCESS;
L
Linus Torvalds 已提交
1165

已提交
1166 1167 1168
eh_host_reset_lock:
	qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
	    (ret == FAILED) ? "failed" : "succeded");
L
Linus Torvalds 已提交
1169

已提交
1170 1171
	return ret;
}
L
Linus Torvalds 已提交
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

/*
* qla2x00_loop_reset
*      Issue loop reset.
*
* Input:
*      ha = adapter block pointer.
*
* Returns:
*      0 = success
*/
1183
int
1184
qla2x00_loop_reset(scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
1185
{
1186
	int ret;
1187
	struct fc_port *fcport;
1188
	struct qla_hw_data *ha = vha->hw;
L
Linus Torvalds 已提交
1189

1190
	if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
		list_for_each_entry(fcport, &vha->vp_fcports, list) {
			if (fcport->port_type != FCT_TARGET)
				continue;

			ret = ha->isp_ops->target_reset(fcport, 0, 0);
			if (ret != QLA_SUCCESS) {
				DEBUG2_3(printk("%s(%ld): bus_reset failed: "
				    "target_reset=%d d_id=%x.\n", __func__,
				    vha->host_no, ret, fcport->d_id.b24));
			}
		}
	}

1204
	if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1205
		ret = qla2x00_full_login_lip(vha);
1206
		if (ret != QLA_SUCCESS) {
1207
			DEBUG2_3(printk("%s(%ld): failed: "
1208
			    "full_login_lip=%d.\n", __func__, vha->host_no,
1209
			    ret));
1210 1211 1212 1213 1214
		}
		atomic_set(&vha->loop_state, LOOP_DOWN);
		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
		qla2x00_mark_all_devices_lost(vha, 0);
		qla2x00_wait_for_loop_ready(vha);
1215 1216
	}

1217
	if (ha->flags.enable_lip_reset) {
1218
		ret = qla2x00_lip_reset(vha);
1219
		if (ret != QLA_SUCCESS) {
1220
			DEBUG2_3(printk("%s(%ld): failed: "
1221 1222 1223
			    "lip_reset=%d.\n", __func__, vha->host_no, ret));
		} else
			qla2x00_wait_for_loop_ready(vha);
L
Linus Torvalds 已提交
1224 1225 1226
	}

	/* Issue marker command only when we are going to start the I/O */
1227
	vha->marker_needed = 1;
L
Linus Torvalds 已提交
1228

1229
	return QLA_SUCCESS;
L
Linus Torvalds 已提交
1230 1231
}

1232
void
1233
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1234
{
1235
	int que, cnt;
1236 1237
	unsigned long flags;
	srb_t *sp;
1238
	struct srb_ctx *ctx;
1239
	struct qla_hw_data *ha = vha->hw;
1240
	struct req_que *req;
1241 1242

	spin_lock_irqsave(&ha->hardware_lock, flags);
1243
	for (que = 0; que < ha->max_req_queues; que++) {
1244
		req = ha->req_q_map[que];
1245 1246 1247 1248
		if (!req)
			continue;
		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
			sp = req->outstanding_cmds[cnt];
1249
			if (sp) {
1250
				req->outstanding_cmds[cnt] = NULL;
1251
				if (!sp->ctx ||
1252 1253
					(sp->flags & SRB_FCP_CMND_DMA_VALID) ||
					IS_PROT_IO(sp)) {
1254 1255 1256 1257
					sp->cmd->result = res;
					qla2x00_sp_compl(ha, sp);
				} else {
					ctx = sp->ctx;
1258 1259
					if (ctx->type == SRB_LOGIN_CMD ||
					    ctx->type == SRB_LOGOUT_CMD) {
1260
						ctx->u.iocb_cmd->free(sp);
1261
					} else {
1262
						struct fc_bsg_job *bsg_job =
1263
						    ctx->u.bsg_job;
1264 1265
						if (bsg_job->request->msgcode
						    == FC_BSG_HST_CT)
1266
							kfree(sp->fcport);
1267 1268
						bsg_job->req->errors = 0;
						bsg_job->reply->result = res;
1269
						bsg_job->job_done(bsg_job);
1270
						kfree(sp->ctx);
1271
						mempool_free(sp,
1272
							ha->srb_mempool);
1273
					}
1274
				}
1275
			}
1276 1277 1278 1279 1280
		}
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}

已提交
1281 1282
static int
qla2xxx_slave_alloc(struct scsi_device *sdev)
L
Linus Torvalds 已提交
1283
{
1284
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
L
Linus Torvalds 已提交
1285

1286
	if (!rport || fc_remote_port_chkready(rport))
已提交
1287
		return -ENXIO;
1288

1289
	sdev->hostdata = *(fc_port_t **)rport->dd_data;
L
Linus Torvalds 已提交
1290

已提交
1291 1292
	return 0;
}
L
Linus Torvalds 已提交
1293

已提交
1294 1295 1296
static int
qla2xxx_slave_configure(struct scsi_device *sdev)
{
1297
	scsi_qla_host_t *vha = shost_priv(sdev->host);
1298
	struct req_que *req = vha->req;
已提交
1299

已提交
1300
	if (sdev->tagged_supported)
1301
		scsi_activate_tcq(sdev, req->max_q_depth);
已提交
1302
	else
1303
		scsi_deactivate_tcq(sdev, req->max_q_depth);
已提交
1304 1305
	return 0;
}
L
Linus Torvalds 已提交
1306

已提交
1307 1308 1309 1310
static void
qla2xxx_slave_destroy(struct scsi_device *sdev)
{
	sdev->hostdata = NULL;
L
Linus Torvalds 已提交
1311 1312
}

1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
{
	fc_port_t *fcport = (struct fc_port *) sdev->hostdata;

	if (!scsi_track_queue_full(sdev, qdepth))
		return;

	DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
		"scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
		fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
		sdev->queue_depth));
}

static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
{
	fc_port_t *fcport = sdev->hostdata;
	struct scsi_qla_host *vha = fcport->vha;
	struct qla_hw_data *ha = vha->hw;
	struct req_que *req = NULL;

	req = vha->req;
	if (!req)
		return;

	if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
		return;

	if (sdev->ordered_tags)
		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
	else
		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);

	DEBUG2(qla_printk(KERN_INFO, ha,
	       "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
	       fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
	       sdev->queue_depth));
}

1351
static int
1352
qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1353
{
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
	switch (reason) {
	case SCSI_QDEPTH_DEFAULT:
		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
		break;
	case SCSI_QDEPTH_QFULL:
		qla2x00_handle_queue_full(sdev, qdepth);
		break;
	case SCSI_QDEPTH_RAMP_UP:
		qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
		break;
	default:
1365
		return -EOPNOTSUPP;
1366
	}
1367

1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	return sdev->queue_depth;
}

static int
qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
{
	if (sdev->tagged_supported) {
		scsi_set_tag_type(sdev, tag_type);
		if (tag_type)
			scsi_activate_tcq(sdev, sdev->queue_depth);
		else
			scsi_deactivate_tcq(sdev, sdev->queue_depth);
	} else
		tag_type = 0;

	return tag_type;
}

L
Linus Torvalds 已提交
1386 1387 1388 1389 1390 1391 1392 1393
/**
 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
 * @ha: HA context
 *
 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
 * supported addressing method.
 */
static void
1394
qla2x00_config_dma_addressing(struct qla_hw_data *ha)
L
Linus Torvalds 已提交
1395
{
1396
	/* Assume a 32bit DMA mask. */
L
Linus Torvalds 已提交
1397 1398
	ha->flags.enable_64bit_addressing = 0;

1399
	if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1400 1401
		/* Any upper-dword bits set? */
		if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1402
		    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1403
			/* Ok, a 64bit DMA mask is applicable. */
L
Linus Torvalds 已提交
1404
			ha->flags.enable_64bit_addressing = 1;
1405 1406
			ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
			ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1407
			return;
L
Linus Torvalds 已提交
1408 1409
		}
	}
1410

1411 1412
	dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
	pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
L
Linus Torvalds 已提交
1413 1414
}

1415
static void
1416
qla2x00_enable_intrs(struct qla_hw_data *ha)
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
{
	unsigned long flags = 0;
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;

	spin_lock_irqsave(&ha->hardware_lock, flags);
	ha->interrupts_on = 1;
	/* enable risc and host interrupts */
	WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
	RD_REG_WORD(&reg->ictrl);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

}

static void
1431
qla2x00_disable_intrs(struct qla_hw_data *ha)
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
{
	unsigned long flags = 0;
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;

	spin_lock_irqsave(&ha->hardware_lock, flags);
	ha->interrupts_on = 0;
	/* disable risc and host interrupts */
	WRT_REG_WORD(&reg->ictrl, 0);
	RD_REG_WORD(&reg->ictrl);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}

static void
1445
qla24xx_enable_intrs(struct qla_hw_data *ha)
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
{
	unsigned long flags = 0;
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

	spin_lock_irqsave(&ha->hardware_lock, flags);
	ha->interrupts_on = 1;
	WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
	RD_REG_DWORD(&reg->ictrl);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}

static void
1458
qla24xx_disable_intrs(struct qla_hw_data *ha)
1459 1460 1461 1462
{
	unsigned long flags = 0;
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

1463 1464
	if (IS_NOPOLLING_TYPE(ha))
		return;
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
	spin_lock_irqsave(&ha->hardware_lock, flags);
	ha->interrupts_on = 0;
	WRT_REG_DWORD(&reg->ictrl, 0);
	RD_REG_DWORD(&reg->ictrl);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}

static struct isp_operations qla2100_isp_ops = {
	.pci_config		= qla2100_pci_config,
	.reset_chip		= qla2x00_reset_chip,
	.chip_diag		= qla2x00_chip_diag,
	.config_rings		= qla2x00_config_rings,
	.reset_adapter		= qla2x00_reset_adapter,
	.nvram_config		= qla2x00_nvram_config,
	.update_fw_options	= qla2x00_update_fw_options,
	.load_risc		= qla2x00_load_risc,
	.pci_info_str		= qla2x00_pci_info_str,
	.fw_version_str		= qla2x00_fw_version_str,
	.intr_handler		= qla2100_intr_handler,
	.enable_intrs		= qla2x00_enable_intrs,
	.disable_intrs		= qla2x00_disable_intrs,
	.abort_command		= qla2x00_abort_command,
1487 1488
	.target_reset		= qla2x00_abort_target,
	.lun_reset		= qla2x00_lun_reset,
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	.fabric_login		= qla2x00_login_fabric,
	.fabric_logout		= qla2x00_fabric_logout,
	.calc_req_entries	= qla2x00_calc_iocbs_32,
	.build_iocbs		= qla2x00_build_scsi_iocbs_32,
	.prep_ms_iocb		= qla2x00_prep_ms_iocb,
	.prep_ms_fdmi_iocb	= qla2x00_prep_ms_fdmi_iocb,
	.read_nvram		= qla2x00_read_nvram_data,
	.write_nvram		= qla2x00_write_nvram_data,
	.fw_dump		= qla2100_fw_dump,
	.beacon_on		= NULL,
	.beacon_off		= NULL,
	.beacon_blink		= NULL,
	.read_optrom		= qla2x00_read_optrom_data,
	.write_optrom		= qla2x00_write_optrom_data,
	.get_flash_version	= qla2x00_get_flash_version,
1504
	.start_scsi		= qla2x00_start_scsi,
1505
	.abort_isp		= qla2x00_abort_isp,
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
};

static struct isp_operations qla2300_isp_ops = {
	.pci_config		= qla2300_pci_config,
	.reset_chip		= qla2x00_reset_chip,
	.chip_diag		= qla2x00_chip_diag,
	.config_rings		= qla2x00_config_rings,
	.reset_adapter		= qla2x00_reset_adapter,
	.nvram_config		= qla2x00_nvram_config,
	.update_fw_options	= qla2x00_update_fw_options,
	.load_risc		= qla2x00_load_risc,
	.pci_info_str		= qla2x00_pci_info_str,
	.fw_version_str		= qla2x00_fw_version_str,
	.intr_handler		= qla2300_intr_handler,
	.enable_intrs		= qla2x00_enable_intrs,
	.disable_intrs		= qla2x00_disable_intrs,
	.abort_command		= qla2x00_abort_command,
1523 1524
	.target_reset		= qla2x00_abort_target,
	.lun_reset		= qla2x00_lun_reset,
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
	.fabric_login		= qla2x00_login_fabric,
	.fabric_logout		= qla2x00_fabric_logout,
	.calc_req_entries	= qla2x00_calc_iocbs_32,
	.build_iocbs		= qla2x00_build_scsi_iocbs_32,
	.prep_ms_iocb		= qla2x00_prep_ms_iocb,
	.prep_ms_fdmi_iocb	= qla2x00_prep_ms_fdmi_iocb,
	.read_nvram		= qla2x00_read_nvram_data,
	.write_nvram		= qla2x00_write_nvram_data,
	.fw_dump		= qla2300_fw_dump,
	.beacon_on		= qla2x00_beacon_on,
	.beacon_off		= qla2x00_beacon_off,
	.beacon_blink		= qla2x00_beacon_blink,
	.read_optrom		= qla2x00_read_optrom_data,
	.write_optrom		= qla2x00_write_optrom_data,
	.get_flash_version	= qla2x00_get_flash_version,
1540
	.start_scsi		= qla2x00_start_scsi,
1541
	.abort_isp		= qla2x00_abort_isp,
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
};

static struct isp_operations qla24xx_isp_ops = {
	.pci_config		= qla24xx_pci_config,
	.reset_chip		= qla24xx_reset_chip,
	.chip_diag		= qla24xx_chip_diag,
	.config_rings		= qla24xx_config_rings,
	.reset_adapter		= qla24xx_reset_adapter,
	.nvram_config		= qla24xx_nvram_config,
	.update_fw_options	= qla24xx_update_fw_options,
	.load_risc		= qla24xx_load_risc,
	.pci_info_str		= qla24xx_pci_info_str,
	.fw_version_str		= qla24xx_fw_version_str,
	.intr_handler		= qla24xx_intr_handler,
	.enable_intrs		= qla24xx_enable_intrs,
	.disable_intrs		= qla24xx_disable_intrs,
	.abort_command		= qla24xx_abort_command,
1559 1560
	.target_reset		= qla24xx_abort_target,
	.lun_reset		= qla24xx_lun_reset,
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575
	.fabric_login		= qla24xx_login_fabric,
	.fabric_logout		= qla24xx_fabric_logout,
	.calc_req_entries	= NULL,
	.build_iocbs		= NULL,
	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
	.read_nvram		= qla24xx_read_nvram_data,
	.write_nvram		= qla24xx_write_nvram_data,
	.fw_dump		= qla24xx_fw_dump,
	.beacon_on		= qla24xx_beacon_on,
	.beacon_off		= qla24xx_beacon_off,
	.beacon_blink		= qla24xx_beacon_blink,
	.read_optrom		= qla24xx_read_optrom_data,
	.write_optrom		= qla24xx_write_optrom_data,
	.get_flash_version	= qla24xx_get_flash_version,
1576
	.start_scsi		= qla24xx_start_scsi,
1577
	.abort_isp		= qla2x00_abort_isp,
1578 1579
};

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
static struct isp_operations qla25xx_isp_ops = {
	.pci_config		= qla25xx_pci_config,
	.reset_chip		= qla24xx_reset_chip,
	.chip_diag		= qla24xx_chip_diag,
	.config_rings		= qla24xx_config_rings,
	.reset_adapter		= qla24xx_reset_adapter,
	.nvram_config		= qla24xx_nvram_config,
	.update_fw_options	= qla24xx_update_fw_options,
	.load_risc		= qla24xx_load_risc,
	.pci_info_str		= qla24xx_pci_info_str,
	.fw_version_str		= qla24xx_fw_version_str,
	.intr_handler		= qla24xx_intr_handler,
	.enable_intrs		= qla24xx_enable_intrs,
	.disable_intrs		= qla24xx_disable_intrs,
	.abort_command		= qla24xx_abort_command,
1595 1596
	.target_reset		= qla24xx_abort_target,
	.lun_reset		= qla24xx_lun_reset,
1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
	.fabric_login		= qla24xx_login_fabric,
	.fabric_logout		= qla24xx_fabric_logout,
	.calc_req_entries	= NULL,
	.build_iocbs		= NULL,
	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
	.read_nvram		= qla25xx_read_nvram_data,
	.write_nvram		= qla25xx_write_nvram_data,
	.fw_dump		= qla25xx_fw_dump,
	.beacon_on		= qla24xx_beacon_on,
	.beacon_off		= qla24xx_beacon_off,
	.beacon_blink		= qla24xx_beacon_blink,
1609
	.read_optrom		= qla25xx_read_optrom_data,
1610 1611
	.write_optrom		= qla24xx_write_optrom_data,
	.get_flash_version	= qla24xx_get_flash_version,
1612
	.start_scsi		= qla24xx_dif_start_scsi,
1613
	.abort_isp		= qla2x00_abort_isp,
1614 1615
};

1616 1617 1618 1619 1620 1621 1622 1623
static struct isp_operations qla81xx_isp_ops = {
	.pci_config		= qla25xx_pci_config,
	.reset_chip		= qla24xx_reset_chip,
	.chip_diag		= qla24xx_chip_diag,
	.config_rings		= qla24xx_config_rings,
	.reset_adapter		= qla24xx_reset_adapter,
	.nvram_config		= qla81xx_nvram_config,
	.update_fw_options	= qla81xx_update_fw_options,
1624
	.load_risc		= qla81xx_load_risc,
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
	.pci_info_str		= qla24xx_pci_info_str,
	.fw_version_str		= qla24xx_fw_version_str,
	.intr_handler		= qla24xx_intr_handler,
	.enable_intrs		= qla24xx_enable_intrs,
	.disable_intrs		= qla24xx_disable_intrs,
	.abort_command		= qla24xx_abort_command,
	.target_reset		= qla24xx_abort_target,
	.lun_reset		= qla24xx_lun_reset,
	.fabric_login		= qla24xx_login_fabric,
	.fabric_logout		= qla24xx_fabric_logout,
	.calc_req_entries	= NULL,
	.build_iocbs		= NULL,
	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
1639 1640
	.read_nvram		= NULL,
	.write_nvram		= NULL,
1641 1642 1643 1644 1645 1646 1647
	.fw_dump		= qla81xx_fw_dump,
	.beacon_on		= qla24xx_beacon_on,
	.beacon_off		= qla24xx_beacon_off,
	.beacon_blink		= qla24xx_beacon_blink,
	.read_optrom		= qla25xx_read_optrom_data,
	.write_optrom		= qla24xx_write_optrom_data,
	.get_flash_version	= qla24xx_get_flash_version,
1648
	.start_scsi		= qla24xx_dif_start_scsi,
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
	.abort_isp		= qla2x00_abort_isp,
};

static struct isp_operations qla82xx_isp_ops = {
	.pci_config		= qla82xx_pci_config,
	.reset_chip		= qla82xx_reset_chip,
	.chip_diag		= qla24xx_chip_diag,
	.config_rings		= qla82xx_config_rings,
	.reset_adapter		= qla24xx_reset_adapter,
	.nvram_config		= qla81xx_nvram_config,
	.update_fw_options	= qla24xx_update_fw_options,
	.load_risc		= qla82xx_load_risc,
	.pci_info_str		= qla82xx_pci_info_str,
	.fw_version_str		= qla24xx_fw_version_str,
	.intr_handler		= qla82xx_intr_handler,
	.enable_intrs		= qla82xx_enable_intrs,
	.disable_intrs		= qla82xx_disable_intrs,
	.abort_command		= qla24xx_abort_command,
	.target_reset		= qla24xx_abort_target,
	.lun_reset		= qla24xx_lun_reset,
	.fabric_login		= qla24xx_login_fabric,
	.fabric_logout		= qla24xx_fabric_logout,
	.calc_req_entries	= NULL,
	.build_iocbs		= NULL,
	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
	.read_nvram		= qla24xx_read_nvram_data,
	.write_nvram		= qla24xx_write_nvram_data,
	.fw_dump		= qla24xx_fw_dump,
	.beacon_on		= qla24xx_beacon_on,
	.beacon_off		= qla24xx_beacon_off,
	.beacon_blink		= qla24xx_beacon_blink,
	.read_optrom		= qla82xx_read_optrom_data,
	.write_optrom		= qla82xx_write_optrom_data,
	.get_flash_version	= qla24xx_get_flash_version,
	.start_scsi             = qla82xx_start_scsi,
	.abort_isp		= qla82xx_abort_isp,
1686 1687
};

1688
static inline void
1689
qla2x00_set_isp_flags(struct qla_hw_data *ha)
1690 1691 1692 1693 1694 1695
{
	ha->device_type = DT_EXTENDED_IDS;
	switch (ha->pdev->device) {
	case PCI_DEVICE_ID_QLOGIC_ISP2100:
		ha->device_type |= DT_ISP2100;
		ha->device_type &= ~DT_EXTENDED_IDS;
1696
		ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1697 1698 1699 1700
		break;
	case PCI_DEVICE_ID_QLOGIC_ISP2200:
		ha->device_type |= DT_ISP2200;
		ha->device_type &= ~DT_EXTENDED_IDS;
1701
		ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1702 1703 1704
		break;
	case PCI_DEVICE_ID_QLOGIC_ISP2300:
		ha->device_type |= DT_ISP2300;
1705
		ha->device_type |= DT_ZIO_SUPPORTED;
1706
		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1707 1708 1709
		break;
	case PCI_DEVICE_ID_QLOGIC_ISP2312:
		ha->device_type |= DT_ISP2312;
1710
		ha->device_type |= DT_ZIO_SUPPORTED;
1711
		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1712 1713 1714
		break;
	case PCI_DEVICE_ID_QLOGIC_ISP2322:
		ha->device_type |= DT_ISP2322;
1715
		ha->device_type |= DT_ZIO_SUPPORTED;
1716 1717 1718
		if (ha->pdev->subsystem_vendor == 0x1028 &&
		    ha->pdev->subsystem_device == 0x0170)
			ha->device_type |= DT_OEM_001;
1719
		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1720 1721 1722
		break;
	case PCI_DEVICE_ID_QLOGIC_ISP6312:
		ha->device_type |= DT_ISP6312;
1723
		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1724 1725 1726
		break;
	case PCI_DEVICE_ID_QLOGIC_ISP6322:
		ha->device_type |= DT_ISP6322;
1727
		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1728 1729 1730
		break;
	case PCI_DEVICE_ID_QLOGIC_ISP2422:
		ha->device_type |= DT_ISP2422;
1731
		ha->device_type |= DT_ZIO_SUPPORTED;
1732
		ha->device_type |= DT_FWI2;
1733
		ha->device_type |= DT_IIDMA;
1734
		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1735 1736 1737
		break;
	case PCI_DEVICE_ID_QLOGIC_ISP2432:
		ha->device_type |= DT_ISP2432;
1738
		ha->device_type |= DT_ZIO_SUPPORTED;
1739
		ha->device_type |= DT_FWI2;
1740
		ha->device_type |= DT_IIDMA;
1741
		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1742
		break;
1743 1744 1745 1746 1747 1748 1749
	case PCI_DEVICE_ID_QLOGIC_ISP8432:
		ha->device_type |= DT_ISP8432;
		ha->device_type |= DT_ZIO_SUPPORTED;
		ha->device_type |= DT_FWI2;
		ha->device_type |= DT_IIDMA;
		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
		break;
1750 1751
	case PCI_DEVICE_ID_QLOGIC_ISP5422:
		ha->device_type |= DT_ISP5422;
1752
		ha->device_type |= DT_FWI2;
1753
		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1754
		break;
1755 1756
	case PCI_DEVICE_ID_QLOGIC_ISP5432:
		ha->device_type |= DT_ISP5432;
1757
		ha->device_type |= DT_FWI2;
1758
		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1759
		break;
1760 1761 1762 1763 1764
	case PCI_DEVICE_ID_QLOGIC_ISP2532:
		ha->device_type |= DT_ISP2532;
		ha->device_type |= DT_ZIO_SUPPORTED;
		ha->device_type |= DT_FWI2;
		ha->device_type |= DT_IIDMA;
1765
		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1766
		break;
1767 1768 1769 1770 1771 1772 1773
	case PCI_DEVICE_ID_QLOGIC_ISP8001:
		ha->device_type |= DT_ISP8001;
		ha->device_type |= DT_ZIO_SUPPORTED;
		ha->device_type |= DT_FWI2;
		ha->device_type |= DT_IIDMA;
		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
		break;
1774 1775 1776 1777 1778 1779 1780 1781
	case PCI_DEVICE_ID_QLOGIC_ISP8021:
		ha->device_type |= DT_ISP8021;
		ha->device_type |= DT_ZIO_SUPPORTED;
		ha->device_type |= DT_FWI2;
		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
		/* Initialize 82XX ISP flags */
		qla82xx_init_flags(ha);
		break;
1782
	}
1783

1784 1785 1786 1787 1788 1789
	if (IS_QLA82XX(ha))
		ha->port_no = !(ha->portnum & 1);
	else
		/* Get adapter physical port no from interrupt pin register. */
		pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);

1790 1791 1792 1793
	if (ha->port_no & 1)
		ha->flags.port0 = 1;
	else
		ha->flags.port0 = 0;
1794 1795
}

L
Linus Torvalds 已提交
1796
static int
1797
qla2x00_iospace_config(struct qla_hw_data *ha)
L
Linus Torvalds 已提交
1798
{
1799
	resource_size_t pio;
1800
	uint16_t msix;
1801
	int cpus;
L
Linus Torvalds 已提交
1802

1803 1804 1805
	if (IS_QLA82XX(ha))
		return qla82xx_iospace_config(ha);

1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
	if (pci_request_selected_regions(ha->pdev, ha->bars,
	    QLA2XXX_DRIVER_NAME)) {
		qla_printk(KERN_WARNING, ha,
		    "Failed to reserve PIO/MMIO regions (%s)\n",
		    pci_name(ha->pdev));

		goto iospace_error_exit;
	}
	if (!(ha->bars & 1))
		goto skip_pio;

L
Linus Torvalds 已提交
1817 1818
	/* We only need PIO for Flash operations on ISP2312 v2 chips. */
	pio = pci_resource_start(ha->pdev, 0);
1819 1820
	if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
		if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
L
Linus Torvalds 已提交
1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
			qla_printk(KERN_WARNING, ha,
			    "Invalid PCI I/O region size (%s)...\n",
				pci_name(ha->pdev));
			pio = 0;
		}
	} else {
		qla_printk(KERN_WARNING, ha,
		    "region #0 not a PIO resource (%s)...\n",
		    pci_name(ha->pdev));
		pio = 0;
	}
1832
	ha->pio_address = pio;
L
Linus Torvalds 已提交
1833

1834
skip_pio:
L
Linus Torvalds 已提交
1835
	/* Use MMIO operations for all accesses. */
1836
	if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
L
Linus Torvalds 已提交
1837
		qla_printk(KERN_ERR, ha,
1838
		    "region #1 not an MMIO resource (%s), aborting\n",
L
Linus Torvalds 已提交
1839 1840 1841
		    pci_name(ha->pdev));
		goto iospace_error_exit;
	}
1842
	if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
L
Linus Torvalds 已提交
1843 1844 1845 1846 1847 1848
		qla_printk(KERN_ERR, ha,
		    "Invalid PCI mem region size (%s), aborting\n",
			pci_name(ha->pdev));
		goto iospace_error_exit;
	}

1849
	ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
L
Linus Torvalds 已提交
1850 1851 1852 1853 1854 1855 1856
	if (!ha->iobase) {
		qla_printk(KERN_ERR, ha,
		    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));

		goto iospace_error_exit;
	}

1857
	/* Determine queue resources */
1858
	ha->max_req_queues = ha->max_rsp_queues = 1;
1859 1860
	if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
		(ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1861
		(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1862
		goto mqiobase_exit;
1863

1864 1865 1866 1867 1868 1869
	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
			pci_resource_len(ha->pdev, 3));
	if (ha->mqiobase) {
		/* Read MSIX vector size of the board */
		pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
		ha->msix_count = msix;
1870 1871 1872 1873
		/* Max queues are bounded by available msix vectors */
		/* queue 0 uses two msix vectors */
		if (ql2xmultique_tag) {
			cpus = num_online_cpus();
1874
			ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1875 1876 1877
				(cpus + 1) : (ha->msix_count - 1);
			ha->max_req_queues = 2;
		} else if (ql2xmaxqueues > 1) {
1878 1879 1880 1881 1882
			ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
						QLA_MQ_SIZE : ql2xmaxqueues;
			DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
			" of request queues:%d\n", ha->max_req_queues));
		}
1883 1884
		qla_printk(KERN_INFO, ha,
			"MSI-X vector count: %d\n", msix);
1885 1886
	} else
		qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1887 1888

mqiobase_exit:
1889
	ha->msix_count = ha->max_rsp_queues + 1;
L
Linus Torvalds 已提交
1890 1891 1892 1893 1894 1895
	return (0);

iospace_error_exit:
	return (-ENOMEM);
}

1896 1897 1898
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
1899
	scsi_qla_host_t *vha = shost_priv(shost);
1900

1901 1902 1903
	if (vha->hw->flags.running_gold_fw)
		return;

1904 1905 1906 1907
	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
	set_bit(RSCN_UPDATE, &vha->dpc_flags);
	set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1908 1909 1910 1911 1912
}

static int
qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
1913
	scsi_qla_host_t *vha = shost_priv(shost);
1914

1915
	if (!vha->host)
1916
		return 1;
1917
	if (time > vha->hw->loop_reset_delay * HZ)
1918 1919
		return 1;

1920
	return atomic_read(&vha->loop_state) == LOOP_READY;
1921 1922
}

L
Linus Torvalds 已提交
1923 1924 1925
/*
 * PCI driver interface
 */
1926 1927
static int __devinit
qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
L
Linus Torvalds 已提交
1928
{
1929
	int	ret = -ENODEV;
L
Linus Torvalds 已提交
1930
	struct Scsi_Host *host;
1931 1932
	scsi_qla_host_t *base_vha = NULL;
	struct qla_hw_data *ha;
1933
	char pci_info[30];
L
Linus Torvalds 已提交
1934
	char fw_str[30];
1935
	struct scsi_host_template *sht;
1936
	int bars, max_id, mem_only = 0;
1937
	uint16_t req_length = 0, rsp_length = 0;
1938 1939
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
L
Linus Torvalds 已提交
1940

1941
	bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1942
	sht = &qla2xxx_driver_template;
1943
	if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
1944
	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
1945
	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
1946
	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1947
	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1948
	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1949 1950
	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1951
		bars = pci_select_bars(pdev, IORESOURCE_MEM);
1952
		mem_only = 1;
1953 1954
	}

1955 1956 1957 1958 1959 1960 1961
	if (mem_only) {
		if (pci_enable_device_mem(pdev))
			goto probe_out;
	} else {
		if (pci_enable_device(pdev))
			goto probe_out;
	}
1962

1963 1964
	/* This may fail but that's ok */
	pci_enable_pcie_error_reporting(pdev);
1965

1966 1967 1968 1969
	ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
	if (!ha) {
		DEBUG(printk("Unable to allocate memory for ha\n"));
		goto probe_out;
L
Linus Torvalds 已提交
1970
	}
1971
	ha->pdev = pdev;
L
Linus Torvalds 已提交
1972 1973

	/* Clear our data area */
1974
	ha->bars = bars;
1975
	ha->mem_only = mem_only;
1976
	spin_lock_init(&ha->hardware_lock);
L
Linus Torvalds 已提交
1977

1978 1979
	/* Set ISP-type information. */
	qla2x00_set_isp_flags(ha);
1980 1981 1982 1983 1984 1985

	/* Set EEH reset type to fundamental if required by hba */
	if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
		pdev->needs_freset = 1;
	}

L
Linus Torvalds 已提交
1986 1987
	/* Configure PCI I/O space */
	ret = qla2x00_iospace_config(ha);
1988
	if (ret)
1989
		goto probe_hw_failed;
L
Linus Torvalds 已提交
1990 1991

	qla_printk(KERN_INFO, ha,
1992 1993
	    "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
	    ha->iobase);
L
Linus Torvalds 已提交
1994 1995

	ha->prev_topology = 0;
1996
	ha->init_cb_size = sizeof(init_cb_t);
1997
	ha->link_data_rate = PORT_SPEED_UNKNOWN;
1998
	ha->optrom_size = OPTROM_SIZE_2300;
L
Linus Torvalds 已提交
1999

2000
	/* Assign ISP specific operations. */
2001
	max_id = MAX_TARGETS_2200;
L
Linus Torvalds 已提交
2002
	if (IS_QLA2100(ha)) {
2003
		max_id = MAX_TARGETS_2100;
L
Linus Torvalds 已提交
2004
		ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
2005 2006 2007
		req_length = REQUEST_ENTRY_CNT_2100;
		rsp_length = RESPONSE_ENTRY_CNT_2100;
		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2008
		ha->gid_list_info_size = 4;
2009 2010 2011 2012
		ha->flash_conf_off = ~0;
		ha->flash_data_off = ~0;
		ha->nvram_conf_off = ~0;
		ha->nvram_data_off = ~0;
2013
		ha->isp_ops = &qla2100_isp_ops;
L
Linus Torvalds 已提交
2014 2015
	} else if (IS_QLA2200(ha)) {
		ha->mbx_count = MAILBOX_REGISTER_COUNT;
2016 2017 2018
		req_length = REQUEST_ENTRY_CNT_2200;
		rsp_length = RESPONSE_ENTRY_CNT_2100;
		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2019
		ha->gid_list_info_size = 4;
2020 2021 2022 2023
		ha->flash_conf_off = ~0;
		ha->flash_data_off = ~0;
		ha->nvram_conf_off = ~0;
		ha->nvram_data_off = ~0;
2024
		ha->isp_ops = &qla2100_isp_ops;
2025
	} else if (IS_QLA23XX(ha)) {
L
Linus Torvalds 已提交
2026
		ha->mbx_count = MAILBOX_REGISTER_COUNT;
2027 2028 2029
		req_length = REQUEST_ENTRY_CNT_2200;
		rsp_length = RESPONSE_ENTRY_CNT_2300;
		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2030
		ha->gid_list_info_size = 6;
2031 2032
		if (IS_QLA2322(ha) || IS_QLA6322(ha))
			ha->optrom_size = OPTROM_SIZE_2322;
2033 2034 2035 2036
		ha->flash_conf_off = ~0;
		ha->flash_data_off = ~0;
		ha->nvram_conf_off = ~0;
		ha->nvram_data_off = ~0;
2037
		ha->isp_ops = &qla2300_isp_ops;
2038
	} else if (IS_QLA24XX_TYPE(ha)) {
2039
		ha->mbx_count = MAILBOX_REGISTER_COUNT;
2040 2041 2042
		req_length = REQUEST_ENTRY_CNT_24XX;
		rsp_length = RESPONSE_ENTRY_CNT_2300;
		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2043
		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2044
		ha->gid_list_info_size = 8;
2045
		ha->optrom_size = OPTROM_SIZE_24XX;
2046
		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
2047
		ha->isp_ops = &qla24xx_isp_ops;
2048 2049 2050 2051
		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2052 2053
	} else if (IS_QLA25XX(ha)) {
		ha->mbx_count = MAILBOX_REGISTER_COUNT;
2054 2055 2056
		req_length = REQUEST_ENTRY_CNT_24XX;
		rsp_length = RESPONSE_ENTRY_CNT_2300;
		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2057 2058 2059
		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
		ha->gid_list_info_size = 8;
		ha->optrom_size = OPTROM_SIZE_25XX;
2060
		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2061
		ha->isp_ops = &qla25xx_isp_ops;
2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
	} else if (IS_QLA81XX(ha)) {
		ha->mbx_count = MAILBOX_REGISTER_COUNT;
		req_length = REQUEST_ENTRY_CNT_24XX;
		rsp_length = RESPONSE_ENTRY_CNT_2300;
		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
		ha->gid_list_info_size = 8;
		ha->optrom_size = OPTROM_SIZE_81XX;
2074
		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2075 2076 2077 2078 2079
		ha->isp_ops = &qla81xx_isp_ops;
		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
		ha->nvram_conf_off = ~0;
		ha->nvram_data_off = ~0;
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
	} else if (IS_QLA82XX(ha)) {
		ha->mbx_count = MAILBOX_REGISTER_COUNT;
		req_length = REQUEST_ENTRY_CNT_82XX;
		rsp_length = RESPONSE_ENTRY_CNT_82XX;
		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
		ha->gid_list_info_size = 8;
		ha->optrom_size = OPTROM_SIZE_82XX;
		ha->isp_ops = &qla82xx_isp_ops;
		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
L
Linus Torvalds 已提交
2093 2094
	}

2095
	mutex_init(&ha->vport_lock);
2096 2097 2098
	init_completion(&ha->mbx_cmd_comp);
	complete(&ha->mbx_cmd_comp);
	init_completion(&ha->mbx_intr_comp);
2099
	init_completion(&ha->dcbx_comp);
L
Linus Torvalds 已提交
2100

2101
	set_bit(0, (unsigned long *) ha->vp_idx_map);
L
Linus Torvalds 已提交
2102

2103
	qla2x00_config_dma_addressing(ha);
2104
	ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2105
	if (!ret) {
L
Linus Torvalds 已提交
2106 2107 2108
		qla_printk(KERN_WARNING, ha,
		    "[ERROR] Failed to allocate memory for adapter\n");

2109 2110 2111
		goto probe_hw_failed;
	}

2112
	req->max_q_depth = MAX_Q_DEPTH;
2113
	if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
2114 2115
		req->max_q_depth = ql2xmaxqdepth;

2116 2117 2118 2119 2120 2121

	base_vha = qla2x00_create_host(sht, ha);
	if (!base_vha) {
		qla_printk(KERN_WARNING, ha,
		    "[ERROR] Failed to allocate memory for scsi_host\n");

2122
		ret = -ENOMEM;
2123
		qla2x00_mem_free(ha);
2124 2125
		qla2x00_free_req_que(ha, req);
		qla2x00_free_rsp_que(ha, rsp);
2126
		goto probe_hw_failed;
L
Linus Torvalds 已提交
2127 2128
	}

2129 2130 2131
	pci_set_drvdata(pdev, base_vha);

	host = base_vha->host;
2132
	base_vha->req = req;
2133 2134
	host->can_queue = req->length + 128;
	if (IS_QLA2XXX_MIDTYPE(ha))
2135
		base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
2136
	else
2137 2138
		base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
						base_vha->vp_idx;
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148

	/* Set the SG table size based on ISP type */
	if (!IS_FWI2_CAPABLE(ha)) {
		if (IS_QLA2100(ha))
			host->sg_tablesize = 32;
	} else {
		if (!IS_QLA82XX(ha))
			host->sg_tablesize = QLA_SG_ALL;
	}

2149 2150 2151 2152
	host->max_id = max_id;
	host->this_id = 255;
	host->cmd_per_lun = 3;
	host->unique_id = host->host_no;
2153 2154 2155 2156
	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
2157 2158 2159
	host->max_channel = MAX_BUSES - 1;
	host->max_lun = MAX_LUNS;
	host->transportt = qla2xxx_transport_template;
2160
	sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2161

2162 2163 2164
	/* Set up the irqs */
	ret = qla2x00_request_irqs(ha, rsp);
	if (ret)
2165
		goto probe_init_failed;
2166 2167 2168

	pci_save_state(pdev);

2169
	/* Alloc arrays of request and response ring ptrs */
2170
que_init:
2171 2172 2173 2174
	if (!qla2x00_alloc_queues(ha)) {
		qla_printk(KERN_WARNING, ha,
		"[ERROR] Failed to allocate memory for queue"
		" pointers\n");
2175
		goto probe_init_failed;
2176
	}
2177

2178 2179
	ha->rsp_q_map[0] = rsp;
	ha->req_q_map[0] = req;
2180 2181 2182 2183
	rsp->req = req;
	req->rsp = rsp;
	set_bit(0, ha->req_qid_map);
	set_bit(0, ha->rsp_qid_map);
2184 2185 2186 2187 2188
	/* FWI2-capable only. */
	req->req_q_in = &ha->iobase->isp24.req_q_in;
	req->req_q_out = &ha->iobase->isp24.req_q_out;
	rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
	rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
2189
	if (ha->mqenable) {
2190 2191 2192 2193
		req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
		req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
		rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
		rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
2194 2195
	}

2196 2197 2198 2199 2200 2201
	if (IS_QLA82XX(ha)) {
		req->req_q_out = &ha->iobase->isp82.req_q_out[0];
		rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
		rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
	}

2202
	if (qla2x00_initialize_adapter(base_vha)) {
L
Linus Torvalds 已提交
2203 2204 2205 2206 2207
		qla_printk(KERN_WARNING, ha,
		    "Failed to initialize adapter\n");

		DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
		    "Adapter flags %x.\n",
2208
		    base_vha->host_no, base_vha->device_flags));
L
Linus Torvalds 已提交
2209

2210 2211 2212 2213 2214 2215 2216 2217
		if (IS_QLA82XX(ha)) {
			qla82xx_idc_lock(ha);
			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
				QLA82XX_DEV_FAILED);
			qla82xx_idc_unlock(ha);
			qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
		}

2218
		ret = -ENODEV;
L
Linus Torvalds 已提交
2219 2220 2221
		goto probe_failed;
	}

2222 2223
	if (ha->mqenable) {
		if (qla25xx_setup_mode(base_vha)) {
2224 2225 2226
			qla_printk(KERN_WARNING, ha,
				"Can't create queues, falling back to single"
				" queue mode\n");
2227 2228 2229
			goto que_init;
		}
	}
2230

2231 2232 2233
	if (ha->flags.running_gold_fw)
		goto skip_dpc;

L
Linus Torvalds 已提交
2234 2235 2236
	/*
	 * Startup the kernel thread for this host adapter
	 */
2237
	ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2238
			"%s_dpc", base_vha->host_str);
2239
	if (IS_ERR(ha->dpc_thread)) {
L
Linus Torvalds 已提交
2240 2241
		qla_printk(KERN_WARNING, ha,
		    "Unable to start DPC thread!\n");
2242
		ret = PTR_ERR(ha->dpc_thread);
L
Linus Torvalds 已提交
2243 2244 2245
		goto probe_failed;
	}

2246
skip_dpc:
2247 2248
	list_add_tail(&base_vha->list, &ha->vp_list);
	base_vha->host->irq = ha->pdev->irq;
L
Linus Torvalds 已提交
2249 2250

	/* Initialized the timer */
2251
	qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
L
Linus Torvalds 已提交
2252 2253

	DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2254
	    base_vha->host_no, ha));
2255

2256
	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2257 2258 2259 2260 2261 2262 2263
		if (ha->fw_attributes & BIT_4) {
			base_vha->flags.difdix_supported = 1;
			DEBUG18(qla_printk(KERN_INFO, ha,
			    "Registering for DIF/DIX type 1 and 3"
			    " protection.\n"));
			scsi_host_set_prot(host,
			    SHOST_DIF_TYPE1_PROTECTION
2264
			    | SHOST_DIF_TYPE2_PROTECTION
2265 2266
			    | SHOST_DIF_TYPE3_PROTECTION
			    | SHOST_DIX_TYPE1_PROTECTION
2267
			    | SHOST_DIX_TYPE2_PROTECTION
2268 2269 2270 2271 2272 2273
			    | SHOST_DIX_TYPE3_PROTECTION);
			scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
		} else
			base_vha->flags.difdix_supported = 0;
	}

2274 2275
	ha->isp_ops->enable_intrs(ha);

2276 2277 2278 2279
	ret = scsi_add_host(host, &pdev->dev);
	if (ret)
		goto probe_failed;

2280 2281 2282
	base_vha->flags.init_done = 1;
	base_vha->flags.online = 1;

2283 2284
	scsi_scan_host(host);

2285
	qla2x00_alloc_sysfs_attr(base_vha);
2286

2287
	qla2x00_init_host_attr(base_vha);
2288

2289
	qla2x00_dfs_setup(base_vha);
2290

L
Linus Torvalds 已提交
2291 2292 2293
	qla_printk(KERN_INFO, ha, "\n"
	    " QLogic Fibre Channel HBA Driver: %s\n"
	    "  QLogic %s - %s\n"
2294 2295
	    "  ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
	    qla2x00_version_str, ha->model_number,
2296 2297 2298 2299
	    ha->model_desc ? ha->model_desc : "", pdev->device,
	    ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
	    ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
	    ha->isp_ops->fw_version_str(base_vha, fw_str));
L
Linus Torvalds 已提交
2300 2301 2302

	return 0;

2303
probe_init_failed:
2304 2305 2306
	qla2x00_free_req_que(ha, req);
	qla2x00_free_rsp_que(ha, rsp);
	ha->max_req_queues = ha->max_rsp_queues = 0;
2307

L
Linus Torvalds 已提交
2308
probe_failed:
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
	if (base_vha->timer_active)
		qla2x00_stop_timer(base_vha);
	base_vha->flags.online = 0;
	if (ha->dpc_thread) {
		struct task_struct *t = ha->dpc_thread;

		ha->dpc_thread = NULL;
		kthread_stop(t);
	}

2319
	qla2x00_free_device(base_vha);
L
Linus Torvalds 已提交
2320

2321
	scsi_host_put(base_vha->host);
L
Linus Torvalds 已提交
2322

2323
probe_hw_failed:
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
	if (IS_QLA82XX(ha)) {
		qla82xx_idc_lock(ha);
		qla82xx_clear_drv_active(ha);
		qla82xx_idc_unlock(ha);
		iounmap((device_reg_t __iomem *)ha->nx_pcibase);
		if (!ql2xdbwr)
			iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
	} else {
		if (ha->iobase)
			iounmap(ha->iobase);
	}
2335 2336 2337
	pci_release_selected_regions(ha->pdev, ha->bars);
	kfree(ha);
	ha = NULL;
L
Linus Torvalds 已提交
2338

2339
probe_out:
2340
	pci_disable_device(pdev);
2341
	return ret;
L
Linus Torvalds 已提交
2342 2343
}

A
Adrian Bunk 已提交
2344
static void
2345
qla2x00_remove_one(struct pci_dev *pdev)
L
Linus Torvalds 已提交
2346
{
2347
	scsi_qla_host_t *base_vha, *vha;
2348
	struct qla_hw_data  *ha;
2349
	unsigned long flags;
2350 2351 2352 2353

	base_vha = pci_get_drvdata(pdev);
	ha = base_vha->hw;

2354 2355 2356 2357 2358 2359 2360
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
		atomic_inc(&vha->vref_count);

		if (vha && vha->fc_vport) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);

2361
			fc_vport_terminate(vha->fc_vport);
2362 2363 2364 2365 2366

			spin_lock_irqsave(&ha->vport_slock, flags);
		}

		atomic_dec(&vha->vref_count);
2367
	}
2368
	spin_unlock_irqrestore(&ha->vport_slock, flags);
L
Linus Torvalds 已提交
2369

2370
	set_bit(UNLOADING, &base_vha->dpc_flags);
L
Linus Torvalds 已提交
2371

2372 2373
	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);

2374
	qla2x00_dfs_remove(base_vha);
2375

2376
	qla84xx_put_chip(base_vha);
2377

2378 2379 2380 2381 2382 2383
	/* Disable timer */
	if (base_vha->timer_active)
		qla2x00_stop_timer(base_vha);

	base_vha->flags.online = 0;

2384 2385 2386 2387 2388 2389 2390
	/* Flush the work queue and remove it */
	if (ha->wq) {
		flush_workqueue(ha->wq);
		destroy_workqueue(ha->wq);
		ha->wq = NULL;
	}

2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402
	/* Kill the kernel thread for this host */
	if (ha->dpc_thread) {
		struct task_struct *t = ha->dpc_thread;

		/*
		 * qla2xxx_wake_dpc checks for ->dpc_thread
		 * so we need to zero it out.
		 */
		ha->dpc_thread = NULL;
		kthread_stop(t);
	}

2403
	qla2x00_free_sysfs_attr(base_vha);
2404

2405
	fc_remove_host(base_vha->host);
2406

2407
	scsi_remove_host(base_vha->host);
L
Linus Torvalds 已提交
2408

2409
	qla2x00_free_device(base_vha);
2410

2411
	scsi_host_put(base_vha->host);
L
Linus Torvalds 已提交
2412

2413
	if (IS_QLA82XX(ha)) {
2414 2415 2416 2417
		qla82xx_idc_lock(ha);
		qla82xx_clear_drv_active(ha);
		qla82xx_idc_unlock(ha);

2418 2419 2420 2421 2422 2423
		iounmap((device_reg_t __iomem *)ha->nx_pcibase);
		if (!ql2xdbwr)
			iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
	} else {
		if (ha->iobase)
			iounmap(ha->iobase);
L
Linus Torvalds 已提交
2424

2425 2426 2427
		if (ha->mqiobase)
			iounmap(ha->mqiobase);
	}
2428

2429 2430 2431
	pci_release_selected_regions(ha->pdev, ha->bars);
	kfree(ha);
	ha = NULL;
L
Linus Torvalds 已提交
2432

2433 2434
	pci_disable_pcie_error_reporting(pdev);

2435
	pci_disable_device(pdev);
L
Linus Torvalds 已提交
2436 2437 2438 2439
	pci_set_drvdata(pdev, NULL);
}

static void
2440
qla2x00_free_device(scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
2441
{
2442
	struct qla_hw_data *ha = vha->hw;
L
Linus Torvalds 已提交
2443

2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
	qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);

	/* Disable timer */
	if (vha->timer_active)
		qla2x00_stop_timer(vha);

	/* Kill the kernel thread for this host */
	if (ha->dpc_thread) {
		struct task_struct *t = ha->dpc_thread;

		/*
		 * qla2xxx_wake_dpc checks for ->dpc_thread
		 * so we need to zero it out.
		 */
		ha->dpc_thread = NULL;
		kthread_stop(t);
	}

2462 2463
	qla25xx_delete_queues(vha);

2464
	if (ha->flags.fce_enabled)
2465
		qla2x00_disable_fce_trace(vha, NULL, NULL);
2466

2467
	if (ha->eft)
2468
		qla2x00_disable_eft_trace(vha);
2469

2470
	/* Stop currently executing firmware. */
2471
	qla2x00_try_to_stop_firmware(vha);
L
Linus Torvalds 已提交
2472

2473 2474
	vha->flags.online = 0;

2475
	/* turn-off interrupts on the card */
2476 2477
	if (ha->interrupts_on) {
		vha->flags.init_done = 0;
2478
		ha->isp_ops->disable_intrs(ha);
2479
	}
2480

2481
	qla2x00_free_irqs(vha);
L
Linus Torvalds 已提交
2482

2483 2484
	qla2x00_free_fcports(vha);

2485
	qla2x00_mem_free(ha);
2486 2487

	qla2x00_free_queues(ha);
L
Linus Torvalds 已提交
2488 2489
}

2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500
void qla2x00_free_fcports(struct scsi_qla_host *vha)
{
	fc_port_t *fcport, *tfcport;

	list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
		list_del(&fcport->list);
		kfree(fcport);
		fcport = NULL;
	}
}

2501
static inline void
2502
qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2503 2504 2505
    int defer)
{
	struct fc_rport *rport;
2506
	scsi_qla_host_t *base_vha;
2507 2508 2509 2510 2511 2512

	if (!fcport->rport)
		return;

	rport = fcport->rport;
	if (defer) {
2513
		base_vha = pci_get_drvdata(vha->hw->pdev);
2514
		spin_lock_irq(vha->host->host_lock);
2515
		fcport->drport = rport;
2516
		spin_unlock_irq(vha->host->host_lock);
2517 2518
		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
		qla2xxx_wake_dpc(base_vha);
2519
	} else
2520 2521 2522
		fc_remote_port_delete(rport);
}

L
Linus Torvalds 已提交
2523 2524 2525 2526 2527 2528 2529 2530 2531
/*
 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
 *
 * Input: ha = adapter block pointer.  fcport = port structure pointer.
 *
 * Return: None.
 *
 * Context:
 */
2532
void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2533
    int do_login, int defer)
L
Linus Torvalds 已提交
2534
{
2535
	if (atomic_read(&fcport->state) == FCS_ONLINE &&
2536 2537 2538 2539
	    vha->vp_idx == fcport->vp_idx) {
		atomic_set(&fcport->state, FCS_DEVICE_LOST);
		qla2x00_schedule_rport_del(vha, fcport, defer);
	}
A
Andrew Vasquez 已提交
2540
	/*
L
Linus Torvalds 已提交
2541 2542 2543 2544 2545 2546 2547 2548 2549 2550
	 * We may need to retry the login, so don't change the state of the
	 * port but do the retries.
	 */
	if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
		atomic_set(&fcport->state, FCS_DEVICE_LOST);

	if (!do_login)
		return;

	if (fcport->login_retry == 0) {
2551 2552
		fcport->login_retry = vha->hw->login_retry_count;
		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
2553 2554 2555 2556

		DEBUG(printk("scsi(%ld): Port login retry: "
		    "%02x%02x%02x%02x%02x%02x%02x%02x, "
		    "id = 0x%04x retry cnt=%d\n",
2557
		    vha->host_no,
L
Linus Torvalds 已提交
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
		    fcport->port_name[0],
		    fcport->port_name[1],
		    fcport->port_name[2],
		    fcport->port_name[3],
		    fcport->port_name[4],
		    fcport->port_name[5],
		    fcport->port_name[6],
		    fcport->port_name[7],
		    fcport->loop_id,
		    fcport->login_retry));
	}
}

/*
 * qla2x00_mark_all_devices_lost
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
void
2585
qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
L
Linus Torvalds 已提交
2586 2587 2588
{
	fc_port_t *fcport;

2589
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2590
		if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
L
Linus Torvalds 已提交
2591
			continue;
2592

L
Linus Torvalds 已提交
2593 2594 2595 2596 2597 2598
		/*
		 * No point in marking the device as lost, if the device is
		 * already DEAD.
		 */
		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
			continue;
2599
		if (atomic_read(&fcport->state) == FCS_ONLINE) {
2600 2601 2602 2603 2604 2605
			if (defer)
				qla2x00_schedule_rport_del(vha, fcport, defer);
			else if (vha->vp_idx == fcport->vp_idx)
				qla2x00_schedule_rport_del(vha, fcport, defer);
		}
		atomic_set(&fcport->state, FCS_DEVICE_LOST);
L
Linus Torvalds 已提交
2606 2607 2608 2609 2610 2611 2612 2613 2614
	}
}

/*
* qla2x00_mem_alloc
*      Allocates adapter memory.
*
* Returns:
*      0  = success.
2615
*      !0  = failure.
L
Linus Torvalds 已提交
2616
*/
2617
static int
2618 2619
qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
	struct req_que **req, struct rsp_que **rsp)
L
Linus Torvalds 已提交
2620 2621 2622
{
	char	name[16];

2623
	ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
2624
		&ha->init_cb_dma, GFP_KERNEL);
2625
	if (!ha->init_cb)
2626
		goto fail;
2627

2628 2629 2630
	ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
		&ha->gid_list_dma, GFP_KERNEL);
	if (!ha->gid_list)
2631
		goto fail_free_init_cb;
L
Linus Torvalds 已提交
2632

2633 2634
	ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
	if (!ha->srb_mempool)
2635
		goto fail_free_gid_list;
2636

2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
	if (IS_QLA82XX(ha)) {
		/* Allocate cache for CT6 Ctx. */
		if (!ctx_cachep) {
			ctx_cachep = kmem_cache_create("qla2xxx_ctx",
				sizeof(struct ct6_dsd), 0,
				SLAB_HWCACHE_ALIGN, NULL);
			if (!ctx_cachep)
				goto fail_free_gid_list;
		}
		ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
			ctx_cachep);
		if (!ha->ctx_mempool)
			goto fail_free_srb_mempool;
	}

2652 2653 2654
	/* Get memory for cached NVRAM */
	ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
	if (!ha->nvram)
2655
		goto fail_free_ctx_mempool;
2656

2657 2658 2659 2660 2661 2662 2663
	snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
		ha->pdev->device);
	ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
		DMA_POOL_SIZE, 8, 0);
	if (!ha->s_dma_pool)
		goto fail_free_nvram;

2664
	if (IS_QLA82XX(ha) || ql2xenabledif) {
2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
		ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
			DSD_LIST_DMA_POOL_SIZE, 8, 0);
		if (!ha->dl_dma_pool) {
			qla_printk(KERN_WARNING, ha,
			    "Memory Allocation failed - dl_dma_pool\n");
			goto fail_s_dma_pool;
		}

		ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
			FCP_CMND_DMA_POOL_SIZE, 8, 0);
		if (!ha->fcp_cmnd_dma_pool) {
			qla_printk(KERN_WARNING, ha,
			    "Memory Allocation failed - fcp_cmnd_dma_pool\n");
			goto fail_dl_dma_pool;
		}
	}

2682 2683
	/* Allocate memory for SNS commands */
	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2684
	/* Get consistent memory allocated for SNS commands */
2685
		ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2686
		sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2687
		if (!ha->sns_cmd)
2688
			goto fail_dma_pool;
2689
	} else {
2690
	/* Get consistent memory allocated for MS IOCB */
2691
		ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2692
			&ha->ms_iocb_dma);
2693
		if (!ha->ms_iocb)
2694 2695
			goto fail_dma_pool;
	/* Get consistent memory allocated for CT SNS commands */
2696
		ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2697
			sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2698 2699
		if (!ha->ct_sns)
			goto fail_free_ms_iocb;
L
Linus Torvalds 已提交
2700 2701
	}

2702
	/* Allocate memory for request ring */
2703 2704
	*req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (!*req) {
2705 2706 2707
		DEBUG(printk("Unable to allocate memory for req\n"));
		goto fail_req;
	}
2708 2709 2710 2711 2712
	(*req)->length = req_len;
	(*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
		((*req)->length + 1) * sizeof(request_t),
		&(*req)->dma, GFP_KERNEL);
	if (!(*req)->ring) {
2713 2714 2715 2716
		DEBUG(printk("Unable to allocate memory for req_ring\n"));
		goto fail_req_ring;
	}
	/* Allocate memory for response ring */
2717 2718 2719 2720
	*rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (!*rsp) {
		qla_printk(KERN_WARNING, ha,
			"Unable to allocate memory for rsp\n");
2721 2722
		goto fail_rsp;
	}
2723 2724 2725 2726 2727 2728 2729 2730
	(*rsp)->hw = ha;
	(*rsp)->length = rsp_len;
	(*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
		((*rsp)->length + 1) * sizeof(response_t),
		&(*rsp)->dma, GFP_KERNEL);
	if (!(*rsp)->ring) {
		qla_printk(KERN_WARNING, ha,
			"Unable to allocate memory for rsp_ring\n");
2731 2732
		goto fail_rsp_ring;
	}
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
	(*req)->rsp = *rsp;
	(*rsp)->req = *req;
	/* Allocate memory for NVRAM data for vports */
	if (ha->nvram_npiv_size) {
		ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
					ha->nvram_npiv_size, GFP_KERNEL);
		if (!ha->npiv_info) {
			qla_printk(KERN_WARNING, ha,
				"Unable to allocate memory for npiv info\n");
			goto fail_npiv_info;
		}
	} else
		ha->npiv_info = NULL;
2746

2747
	/* Get consistent memory allocated for EX-INIT-CB. */
2748
	if (IS_QLA8XXX_TYPE(ha)) {
2749 2750 2751 2752 2753 2754
		ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
		    &ha->ex_init_cb_dma);
		if (!ha->ex_init_cb)
			goto fail_ex_init_cb;
	}

2755 2756
	INIT_LIST_HEAD(&ha->gbl_dsd_list);

2757 2758 2759 2760 2761 2762 2763 2764
	/* Get consistent memory allocated for Async Port-Database. */
	if (!IS_FWI2_CAPABLE(ha)) {
		ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
			&ha->async_pd_dma);
		if (!ha->async_pd)
			goto fail_async_pd;
	}

2765 2766 2767
	INIT_LIST_HEAD(&ha->vp_list);
	return 1;

2768 2769
fail_async_pd:
	dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
2770 2771
fail_ex_init_cb:
	kfree(ha->npiv_info);
2772 2773 2774 2775 2776
fail_npiv_info:
	dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
		sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
	(*rsp)->ring = NULL;
	(*rsp)->dma = 0;
2777
fail_rsp_ring:
2778
	kfree(*rsp);
2779
fail_rsp:
2780 2781 2782 2783
	dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
		sizeof(request_t), (*req)->ring, (*req)->dma);
	(*req)->ring = NULL;
	(*req)->dma = 0;
2784
fail_req_ring:
2785
	kfree(*req);
2786 2787 2788 2789 2790
fail_req:
	dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
		ha->ct_sns, ha->ct_sns_dma);
	ha->ct_sns = NULL;
	ha->ct_sns_dma = 0;
2791 2792 2793 2794
fail_free_ms_iocb:
	dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
	ha->ms_iocb = NULL;
	ha->ms_iocb_dma = 0;
2795
fail_dma_pool:
2796
	if (IS_QLA82XX(ha) || ql2xenabledif) {
2797 2798 2799 2800
		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
		ha->fcp_cmnd_dma_pool = NULL;
	}
fail_dl_dma_pool:
2801
	if (IS_QLA82XX(ha) || ql2xenabledif) {
2802 2803 2804 2805
		dma_pool_destroy(ha->dl_dma_pool);
		ha->dl_dma_pool = NULL;
	}
fail_s_dma_pool:
2806 2807
	dma_pool_destroy(ha->s_dma_pool);
	ha->s_dma_pool = NULL;
2808 2809 2810
fail_free_nvram:
	kfree(ha->nvram);
	ha->nvram = NULL;
2811 2812 2813
fail_free_ctx_mempool:
	mempool_destroy(ha->ctx_mempool);
	ha->ctx_mempool = NULL;
2814 2815 2816 2817 2818
fail_free_srb_mempool:
	mempool_destroy(ha->srb_mempool);
	ha->srb_mempool = NULL;
fail_free_gid_list:
	dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2819
	ha->gid_list_dma);
2820 2821
	ha->gid_list = NULL;
	ha->gid_list_dma = 0;
2822 2823 2824 2825 2826
fail_free_init_cb:
	dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
	ha->init_cb_dma);
	ha->init_cb = NULL;
	ha->init_cb_dma = 0;
2827
fail:
2828
	DEBUG(printk("%s: Memory allocation failure\n", __func__));
2829
	return -ENOMEM;
L
Linus Torvalds 已提交
2830 2831 2832 2833 2834 2835 2836 2837 2838
}

/*
* qla2x00_mem_free
*      Frees all adapter allocated memory.
*
* Input:
*      ha = adapter block pointer.
*/
A
Adrian Bunk 已提交
2839
static void
2840
qla2x00_mem_free(struct qla_hw_data *ha)
L
Linus Torvalds 已提交
2841
{
2842 2843
	if (ha->srb_mempool)
		mempool_destroy(ha->srb_mempool);
L
Linus Torvalds 已提交
2844

2845 2846
	if (ha->fce)
		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2847
		ha->fce_dma);
2848

2849 2850 2851
	if (ha->fw_dump) {
		if (ha->eft)
			dma_free_coherent(&ha->pdev->dev,
2852
			ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2853 2854 2855
		vfree(ha->fw_dump);
	}

2856 2857 2858 2859
	if (ha->dcbx_tlv)
		dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
		    ha->dcbx_tlv, ha->dcbx_tlv_dma);

2860 2861 2862 2863
	if (ha->xgmac_data)
		dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
		    ha->xgmac_data, ha->xgmac_data_dma);

L
Linus Torvalds 已提交
2864 2865
	if (ha->sns_cmd)
		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2866
		ha->sns_cmd, ha->sns_cmd_dma);
L
Linus Torvalds 已提交
2867 2868 2869

	if (ha->ct_sns)
		dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2870
		ha->ct_sns, ha->ct_sns_dma);
L
Linus Torvalds 已提交
2871

2872 2873 2874
	if (ha->sfp_data)
		dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);

2875 2876 2877
	if (ha->edc_data)
		dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);

L
Linus Torvalds 已提交
2878 2879 2880
	if (ha->ms_iocb)
		dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);

2881
	if (ha->ex_init_cb)
2882 2883
		dma_pool_free(ha->s_dma_pool,
			ha->ex_init_cb, ha->ex_init_cb_dma);
2884

2885 2886 2887
	if (ha->async_pd)
		dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);

L
Linus Torvalds 已提交
2888 2889 2890 2891 2892
	if (ha->s_dma_pool)
		dma_pool_destroy(ha->s_dma_pool);

	if (ha->gid_list)
		dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2893
		ha->gid_list_dma);
L
Linus Torvalds 已提交
2894

2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
	if (IS_QLA82XX(ha)) {
		if (!list_empty(&ha->gbl_dsd_list)) {
			struct dsd_dma *dsd_ptr, *tdsd_ptr;

			/* clean up allocated prev pool */
			list_for_each_entry_safe(dsd_ptr,
				tdsd_ptr, &ha->gbl_dsd_list, list) {
				dma_pool_free(ha->dl_dma_pool,
				dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
				list_del(&dsd_ptr->list);
				kfree(dsd_ptr);
			}
		}
	}

	if (ha->dl_dma_pool)
		dma_pool_destroy(ha->dl_dma_pool);

	if (ha->fcp_cmnd_dma_pool)
		dma_pool_destroy(ha->fcp_cmnd_dma_pool);

	if (ha->ctx_mempool)
		mempool_destroy(ha->ctx_mempool);

2919 2920
	if (ha->init_cb)
		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2921
			ha->init_cb, ha->init_cb_dma);
2922 2923
	vfree(ha->optrom_buffer);
	kfree(ha->nvram);
2924
	kfree(ha->npiv_info);
L
Linus Torvalds 已提交
2925

2926
	ha->srb_mempool = NULL;
2927
	ha->ctx_mempool = NULL;
2928 2929
	ha->eft = NULL;
	ha->eft_dma = 0;
L
Linus Torvalds 已提交
2930 2931 2932 2933 2934 2935 2936 2937
	ha->sns_cmd = NULL;
	ha->sns_cmd_dma = 0;
	ha->ct_sns = NULL;
	ha->ct_sns_dma = 0;
	ha->ms_iocb = NULL;
	ha->ms_iocb_dma = 0;
	ha->init_cb = NULL;
	ha->init_cb_dma = 0;
2938 2939
	ha->ex_init_cb = NULL;
	ha->ex_init_cb_dma = 0;
2940 2941
	ha->async_pd = NULL;
	ha->async_pd_dma = 0;
L
Linus Torvalds 已提交
2942 2943

	ha->s_dma_pool = NULL;
2944 2945
	ha->dl_dma_pool = NULL;
	ha->fcp_cmnd_dma_pool = NULL;
L
Linus Torvalds 已提交
2946 2947 2948 2949

	ha->gid_list = NULL;
	ha->gid_list_dma = 0;

2950 2951 2952 2953
	ha->fw_dump = NULL;
	ha->fw_dumped = 0;
	ha->fw_dump_reading = 0;
}
L
Linus Torvalds 已提交
2954

2955 2956 2957 2958 2959
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
						struct qla_hw_data *ha)
{
	struct Scsi_Host *host;
	struct scsi_qla_host *vha = NULL;
2960

2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979
	host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
	if (host == NULL) {
		printk(KERN_WARNING
		"qla2xxx: Couldn't allocate host from scsi layer!\n");
		goto fail;
	}

	/* Clear our data area */
	vha = shost_priv(host);
	memset(vha, 0, sizeof(scsi_qla_host_t));

	vha->host = host;
	vha->host_no = host->host_no;
	vha->hw = ha;

	INIT_LIST_HEAD(&vha->vp_fcports);
	INIT_LIST_HEAD(&vha->work_list);
	INIT_LIST_HEAD(&vha->list);

2980 2981
	spin_lock_init(&vha->work_lock);

2982 2983 2984 2985 2986
	sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
	return vha;

fail:
	return vha;
L
Linus Torvalds 已提交
2987 2988
}

2989
static struct qla_work_evt *
2990
qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2991 2992
{
	struct qla_work_evt *e;
2993 2994 2995 2996 2997
	uint8_t bail;

	QLA_VHA_MARK_BUSY(vha, bail);
	if (bail)
		return NULL;
2998

2999
	e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
3000 3001
	if (!e) {
		QLA_VHA_MARK_NOT_BUSY(vha);
3002
		return NULL;
3003
	}
3004 3005 3006 3007 3008 3009 3010

	INIT_LIST_HEAD(&e->list);
	e->type = type;
	e->flags = QLA_EVT_FLAG_FREE;
	return e;
}

3011
static int
3012
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
3013
{
3014
	unsigned long flags;
3015

3016
	spin_lock_irqsave(&vha->work_lock, flags);
3017
	list_add_tail(&e->list, &vha->work_list);
3018
	spin_unlock_irqrestore(&vha->work_lock, flags);
3019
	qla2xxx_wake_dpc(vha);
3020

3021 3022 3023 3024
	return QLA_SUCCESS;
}

int
3025
qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
3026 3027 3028 3029
    u32 data)
{
	struct qla_work_evt *e;

3030
	e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
3031 3032 3033 3034 3035
	if (!e)
		return QLA_FUNCTION_FAILED;

	e->u.aen.code = code;
	e->u.aen.data = data;
3036
	return qla2x00_post_work(vha, e);
3037 3038
}

3039 3040 3041 3042 3043
int
qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
{
	struct qla_work_evt *e;

3044
	e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
3045 3046 3047 3048
	if (!e)
		return QLA_FUNCTION_FAILED;

	memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3049
	return qla2x00_post_work(vha, e);
3050 3051
}

3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074
#define qla2x00_post_async_work(name, type)	\
int qla2x00_post_async_##name##_work(		\
    struct scsi_qla_host *vha,			\
    fc_port_t *fcport, uint16_t *data)		\
{						\
	struct qla_work_evt *e;			\
						\
	e = qla2x00_alloc_work(vha, type);	\
	if (!e)					\
		return QLA_FUNCTION_FAILED;	\
						\
	e->u.logio.fcport = fcport;		\
	if (data) {				\
		e->u.logio.data[0] = data[0];	\
		e->u.logio.data[1] = data[1];	\
	}					\
	return qla2x00_post_work(vha, e);	\
}

qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
3075 3076
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
3077

3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
{
	struct qla_work_evt *e;

	e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
	if (!e)
		return QLA_FUNCTION_FAILED;

	e->u.uevent.code = code;
	return qla2x00_post_work(vha, e);
}

static void
qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
{
	char event_string[40];
	char *envp[] = { event_string, NULL };

	switch (code) {
	case QLA_UEVENT_CODE_FW_DUMP:
		snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
		    vha->host_no);
		break;
	default:
		/* do nothing */
		break;
	}
	kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
}

3109
void
3110
qla2x00_do_work(struct scsi_qla_host *vha)
3111
{
3112 3113 3114
	struct qla_work_evt *e, *tmp;
	unsigned long flags;
	LIST_HEAD(work);
3115

3116 3117 3118 3119 3120
	spin_lock_irqsave(&vha->work_lock, flags);
	list_splice_init(&vha->work_list, &work);
	spin_unlock_irqrestore(&vha->work_lock, flags);

	list_for_each_entry_safe(e, tmp, &work, list) {
3121 3122 3123 3124
		list_del_init(&e->list);

		switch (e->type) {
		case QLA_EVT_AEN:
3125
			fc_host_post_event(vha->host, fc_get_event_number(),
3126 3127
			    e->u.aen.code, e->u.aen.data);
			break;
3128 3129 3130
		case QLA_EVT_IDC_ACK:
			qla81xx_idc_ack(vha, e->u.idc_ack.mb);
			break;
3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
		case QLA_EVT_ASYNC_LOGIN:
			qla2x00_async_login(vha, e->u.logio.fcport,
			    e->u.logio.data);
			break;
		case QLA_EVT_ASYNC_LOGIN_DONE:
			qla2x00_async_login_done(vha, e->u.logio.fcport,
			    e->u.logio.data);
			break;
		case QLA_EVT_ASYNC_LOGOUT:
			qla2x00_async_logout(vha, e->u.logio.fcport);
			break;
		case QLA_EVT_ASYNC_LOGOUT_DONE:
			qla2x00_async_logout_done(vha, e->u.logio.fcport,
			    e->u.logio.data);
			break;
3146 3147 3148 3149 3150 3151 3152 3153
		case QLA_EVT_ASYNC_ADISC:
			qla2x00_async_adisc(vha, e->u.logio.fcport,
			    e->u.logio.data);
			break;
		case QLA_EVT_ASYNC_ADISC_DONE:
			qla2x00_async_adisc_done(vha, e->u.logio.fcport,
			    e->u.logio.data);
			break;
3154 3155 3156
		case QLA_EVT_UEVENT:
			qla2x00_uevent_emit(vha, e->u.uevent.code);
			break;
3157 3158 3159
		}
		if (e->flags & QLA_EVT_FLAG_FREE)
			kfree(e);
3160 3161 3162

		/* For each work completed decrement vha ref count */
		QLA_VHA_MARK_NOT_BUSY(vha);
3163 3164
	}
}
3165

3166 3167 3168 3169 3170 3171
/* Relogins all the fcports of a vport
 * Context: dpc thread
 */
void qla2x00_relogin(struct scsi_qla_host *vha)
{
	fc_port_t       *fcport;
3172
	int status;
3173 3174
	uint16_t        next_loopid = 0;
	struct qla_hw_data *ha = vha->hw;
3175
	uint16_t data[2];
3176 3177 3178 3179 3180 3181

	list_for_each_entry(fcport, &vha->vp_fcports, list) {
	/*
	 * If the port is not ONLINE then try to login
	 * to it if we haven't run out of retries.
	 */
3182 3183
		if (atomic_read(&fcport->state) != FCS_ONLINE &&
		    fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
3184
			fcport->login_retry--;
3185
			if (fcport->flags & FCF_FABRIC_DEVICE) {
3186
				if (fcport->flags & FCF_FCP2_DEVICE)
3187 3188 3189 3190 3191 3192
					ha->isp_ops->fabric_logout(vha,
							fcport->loop_id,
							fcport->d_id.b.domain,
							fcport->d_id.b.area,
							fcport->d_id.b.al_pa);

3193
				if (IS_ALOGIO_CAPABLE(ha)) {
3194
					fcport->flags |= FCF_ASYNC_SENT;
3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205
					data[0] = 0;
					data[1] = QLA_LOGIO_LOGIN_RETRIED;
					status = qla2x00_post_async_login_work(
					    vha, fcport, data);
					if (status == QLA_SUCCESS)
						continue;
					/* Attempt a retry. */
					status = 1;
				} else
					status = qla2x00_fabric_login(vha,
					    fcport, &next_loopid);
3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233
			} else
				status = qla2x00_local_device_login(vha,
								fcport);

			if (status == QLA_SUCCESS) {
				fcport->old_loop_id = fcport->loop_id;

				DEBUG(printk("scsi(%ld): port login OK: logged "
				"in ID 0x%x\n", vha->host_no, fcport->loop_id));

				qla2x00_update_fcport(vha, fcport);

			} else if (status == 1) {
				set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
				/* retry the login again */
				DEBUG(printk("scsi(%ld): Retrying"
				" %d login again loop_id 0x%x\n",
				vha->host_no, fcport->login_retry,
						fcport->loop_id));
			} else {
				fcport->login_retry = 0;
			}

			if (fcport->login_retry == 0 && status != QLA_SUCCESS)
				fcport->loop_id = FC_NO_LOOP_ID;
		}
		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
			break;
3234 3235 3236
	}
}

L
Linus Torvalds 已提交
3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252
/**************************************************************************
* qla2x00_do_dpc
*   This kernel thread is a task that is schedule by the interrupt handler
*   to perform the background processing for interrupts.
*
* Notes:
* This task always run in the context of a kernel thread.  It
* is kick-off by the driver's detect code and starts up
* up one per adapter. It immediately goes to sleep and waits for
* some fibre event.  When either the interrupt handler or
* the timer routine detects a event it will one of the task
* bits then wake us up.
**************************************************************************/
static int
qla2x00_do_dpc(void *data)
{
3253
	int		rval;
3254 3255
	scsi_qla_host_t *base_vha;
	struct qla_hw_data *ha;
L
Linus Torvalds 已提交
3256

3257 3258
	ha = (struct qla_hw_data *)data;
	base_vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
3259 3260 3261

	set_user_nice(current, -20);

3262
	while (!kthread_should_stop()) {
L
Linus Torvalds 已提交
3263 3264
		DEBUG3(printk("qla2x00: DPC handler sleeping\n"));

3265 3266 3267
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
		__set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
3268 3269 3270 3271

		DEBUG3(printk("qla2x00: DPC handler waking up\n"));

		/* Initialization not yet finished. Don't do anything yet. */
3272
		if (!base_vha->flags.init_done)
L
Linus Torvalds 已提交
3273 3274
			continue;

3275 3276 3277 3278 3279 3280 3281
		if (ha->flags.eeh_busy) {
			DEBUG17(qla_printk(KERN_WARNING, ha,
			    "qla2x00_do_dpc: dpc_flags: %lx\n",
			    base_vha->dpc_flags));
			continue;
		}

3282
		DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
L
Linus Torvalds 已提交
3283 3284 3285 3286 3287 3288 3289 3290

		ha->dpc_active = 1;

		if (ha->flags.mbox_busy) {
			ha->dpc_active = 0;
			continue;
		}

3291
		qla2x00_do_work(base_vha);
3292

3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
		if (IS_QLA82XX(ha)) {
			if (test_and_clear_bit(ISP_UNRECOVERABLE,
				&base_vha->dpc_flags)) {
				qla82xx_idc_lock(ha);
				qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
					QLA82XX_DEV_FAILED);
				qla82xx_idc_unlock(ha);
				qla_printk(KERN_INFO, ha,
					"HW State: FAILED\n");
				qla82xx_device_state_handler(base_vha);
				continue;
			}

			if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
				&base_vha->dpc_flags)) {

				DEBUG(printk(KERN_INFO
					"scsi(%ld): dpc: sched "
					"qla82xx_fcoe_ctx_reset ha = %p\n",
					base_vha->host_no, ha));
				if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
					&base_vha->dpc_flags))) {
					if (qla82xx_fcoe_ctx_reset(base_vha)) {
						/* FCoE-ctx reset failed.
						 * Escalate to chip-reset
						 */
						set_bit(ISP_ABORT_NEEDED,
							&base_vha->dpc_flags);
					}
					clear_bit(ABORT_ISP_ACTIVE,
						&base_vha->dpc_flags);
				}

				DEBUG(printk("scsi(%ld): dpc:"
					" qla82xx_fcoe_ctx_reset end\n",
					base_vha->host_no));
			}
		}

3332 3333
		if (test_and_clear_bit(ISP_ABORT_NEEDED,
						&base_vha->dpc_flags)) {
L
Linus Torvalds 已提交
3334 3335 3336

			DEBUG(printk("scsi(%ld): dpc: sched "
			    "qla2x00_abort_isp ha = %p\n",
3337
			    base_vha->host_no, ha));
L
Linus Torvalds 已提交
3338
			if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3339
			    &base_vha->dpc_flags))) {
L
Linus Torvalds 已提交
3340

3341
				if (ha->isp_ops->abort_isp(base_vha)) {
L
Linus Torvalds 已提交
3342 3343
					/* failed. retry later */
					set_bit(ISP_ABORT_NEEDED,
3344
					    &base_vha->dpc_flags);
3345
				}
3346 3347
				clear_bit(ABORT_ISP_ACTIVE,
						&base_vha->dpc_flags);
3348 3349
			}

L
Linus Torvalds 已提交
3350
			DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
3351
			    base_vha->host_no));
L
Linus Torvalds 已提交
3352 3353
		}

3354 3355 3356
		if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
			qla2x00_update_fcports(base_vha);
			clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3357
		}
3358

3359 3360 3361
		if (test_and_clear_bit(RESET_MARKER_NEEDED,
							&base_vha->dpc_flags) &&
		    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
L
Linus Torvalds 已提交
3362 3363

			DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
3364
			    base_vha->host_no));
L
Linus Torvalds 已提交
3365

3366 3367
			qla2x00_rst_aen(base_vha);
			clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
L
Linus Torvalds 已提交
3368 3369 3370
		}

		/* Retry each device up to login retry count */
3371 3372 3373 3374
		if ((test_and_clear_bit(RELOGIN_NEEDED,
						&base_vha->dpc_flags)) &&
		    !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
		    atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
L
Linus Torvalds 已提交
3375 3376

			DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
3377 3378 3379
					base_vha->host_no));
			qla2x00_relogin(base_vha);

L
Linus Torvalds 已提交
3380
			DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
3381
			    base_vha->host_no));
L
Linus Torvalds 已提交
3382 3383
		}

3384 3385
		if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
							&base_vha->dpc_flags)) {
L
Linus Torvalds 已提交
3386 3387

			DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
3388
				base_vha->host_no));
L
Linus Torvalds 已提交
3389 3390

			if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
3391
			    &base_vha->dpc_flags))) {
L
Linus Torvalds 已提交
3392

3393
				rval = qla2x00_loop_resync(base_vha);
L
Linus Torvalds 已提交
3394

3395 3396
				clear_bit(LOOP_RESYNC_ACTIVE,
						&base_vha->dpc_flags);
L
Linus Torvalds 已提交
3397 3398 3399
			}

			DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
3400
			    base_vha->host_no));
L
Linus Torvalds 已提交
3401 3402
		}

3403 3404 3405 3406
		if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
		    atomic_read(&base_vha->loop_state) == LOOP_READY) {
			clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
			qla2xxx_flash_npiv_conf(base_vha);
3407 3408
		}

L
Linus Torvalds 已提交
3409
		if (!ha->interrupts_on)
3410
			ha->isp_ops->enable_intrs(ha);
L
Linus Torvalds 已提交
3411

3412 3413 3414
		if (test_and_clear_bit(BEACON_BLINK_NEEDED,
					&base_vha->dpc_flags))
			ha->isp_ops->beacon_blink(base_vha);
3415

3416
		qla2x00_do_dpc_all_vps(base_vha);
3417

L
Linus Torvalds 已提交
3418 3419 3420
		ha->dpc_active = 0;
	} /* End of while(1) */

3421
	DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
L
Linus Torvalds 已提交
3422 3423 3424 3425 3426 3427

	/*
	 * Make sure that nobody tries to wake us up again.
	 */
	ha->dpc_active = 0;

3428 3429 3430
	/* Cleanup any residual CTX SRBs. */
	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);

3431 3432 3433 3434
	return 0;
}

void
3435
qla2xxx_wake_dpc(struct scsi_qla_host *vha)
3436
{
3437
	struct qla_hw_data *ha = vha->hw;
3438 3439
	struct task_struct *t = ha->dpc_thread;

3440
	if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
3441
		wake_up_process(t);
L
Linus Torvalds 已提交
3442 3443 3444 3445 3446 3447 3448 3449 3450 3451
}

/*
*  qla2x00_rst_aen
*      Processes asynchronous reset.
*
* Input:
*      ha  = adapter block pointer.
*/
static void
3452
qla2x00_rst_aen(scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
3453
{
3454 3455 3456
	if (vha->flags.online && !vha->flags.reset_active &&
	    !atomic_read(&vha->loop_down_timer) &&
	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
L
Linus Torvalds 已提交
3457
		do {
3458
			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
3459 3460 3461 3462 3463

			/*
			 * Issue marker command only when we are going to start
			 * the I/O.
			 */
3464 3465 3466
			vha->marker_needed = 1;
		} while (!atomic_read(&vha->loop_down_timer) &&
		    (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
L
Linus Torvalds 已提交
3467 3468 3469
	}
}

已提交
3470
static void
3471
qla2x00_sp_free_dma(srb_t *sp)
已提交
3472 3473
{
	struct scsi_cmnd *cmd = sp->cmd;
3474
	struct qla_hw_data *ha = sp->fcport->vha->hw;
已提交
3475 3476

	if (sp->flags & SRB_DMA_VALID) {
3477
		scsi_dma_unmap(cmd);
已提交
3478 3479
		sp->flags &= ~SRB_DMA_VALID;
	}
3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498

	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
	}

	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
		/* List assured to be having elements */
		qla2x00_clean_dsd_pool(ha, sp);
		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
	}

	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
		dma_pool_free(ha->dl_dma_pool, sp->ctx,
		    ((struct crc_context *)sp->ctx)->crc_ctx_dma);
		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
	}

3499
	CMD_SP(cmd) = NULL;
已提交
3500 3501
}

3502
static void
3503
qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
已提交
3504 3505 3506
{
	struct scsi_cmnd *cmd = sp->cmd;

3507
	qla2x00_sp_free_dma(sp);
已提交
3508

3509 3510 3511 3512 3513 3514 3515 3516 3517 3518
	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
		struct ct6_dsd *ctx = sp->ctx;
		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
			ctx->fcp_cmnd_dma);
		list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
		ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
		ha->gbl_dsd_avail += ctx->dsd_use_cnt;
		mempool_free(sp->ctx, ha->ctx_mempool);
		sp->ctx = NULL;
	}
已提交
3519

3520
	mempool_free(sp, ha->srb_mempool);
已提交
3521 3522
	cmd->scsi_done(cmd);
}
3523

3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537
void
qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
{
	if (atomic_read(&sp->ref_count) == 0) {
		DEBUG2(qla_printk(KERN_WARNING, ha,
		    "SP reference-count to ZERO -- sp=%p\n", sp));
		DEBUG2(BUG());
		return;
	}
	if (!atomic_dec_and_test(&sp->ref_count))
		return;
	qla2x00_sp_final_compl(ha, sp);
}

L
Linus Torvalds 已提交
3538 3539 3540 3541 3542 3543 3544 3545
/**************************************************************************
*   qla2x00_timer
*
* Description:
*   One second timer
*
* Context: Interrupt
***************************************************************************/
3546
void
3547
qla2x00_timer(scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
3548 3549 3550 3551 3552
{
	unsigned long	cpu_flags = 0;
	int		start_dpc = 0;
	int		index;
	srb_t		*sp;
3553
	uint16_t        w;
3554
	struct qla_hw_data *ha = vha->hw;
3555
	struct req_que *req;
3556

3557 3558 3559 3560 3561
	if (ha->flags.eeh_busy) {
		qla2x00_restart_timer(vha, WATCH_INTERVAL);
		return;
	}

3562 3563 3564
	if (IS_QLA82XX(ha))
		qla82xx_watchdog(vha);

3565 3566 3567
	/* Hardware read to raise pending EEH errors during mailbox waits. */
	if (!pci_channel_offline(ha->pdev))
		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
L
Linus Torvalds 已提交
3568 3569

	/* Loop down handler. */
3570 3571 3572
	if (atomic_read(&vha->loop_down_timer) > 0 &&
	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		&& vha->flags.online) {
L
Linus Torvalds 已提交
3573

3574 3575
		if (atomic_read(&vha->loop_down_timer) ==
		    vha->loop_down_abort_time) {
L
Linus Torvalds 已提交
3576 3577 3578

			DEBUG(printk("scsi(%ld): Loop Down - aborting the "
			    "queues before time expire\n",
3579
			    vha->host_no));
L
Linus Torvalds 已提交
3580

3581 3582
			if (!IS_QLA2100(ha) && vha->link_down_timeout)
				atomic_set(&vha->loop_state, LOOP_DEAD);
L
Linus Torvalds 已提交
3583

3584 3585 3586 3587
			/*
			 * Schedule an ISP abort to return any FCP2-device
			 * commands.
			 */
3588
			/* NPIV - scan physical port only */
3589
			if (!vha->vp_idx) {
3590 3591
				spin_lock_irqsave(&ha->hardware_lock,
				    cpu_flags);
3592
				req = ha->req_q_map[0];
3593 3594 3595 3596 3597
				for (index = 1;
				    index < MAX_OUTSTANDING_COMMANDS;
				    index++) {
					fc_port_t *sfcp;

3598
					sp = req->outstanding_cmds[index];
3599 3600
					if (!sp)
						continue;
3601
					if (sp->ctx && !IS_PROT_IO(sp))
3602
						continue;
3603
					sfcp = sp->fcport;
3604
					if (!(sfcp->flags & FCF_FCP2_DEVICE))
3605
						continue;
3606

3607
					set_bit(ISP_ABORT_NEEDED,
3608
							&vha->dpc_flags);
3609 3610 3611
					break;
				}
				spin_unlock_irqrestore(&ha->hardware_lock,
3612
								cpu_flags);
L
Linus Torvalds 已提交
3613 3614 3615 3616 3617
			}
			start_dpc++;
		}

		/* if the loop has been down for 4 minutes, reinit adapter */
3618
		if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3619
			if (!(vha->device_flags & DFLG_NO_CABLE)) {
L
Linus Torvalds 已提交
3620 3621
				DEBUG(printk("scsi(%ld): Loop down - "
				    "aborting ISP.\n",
3622
				    vha->host_no));
L
Linus Torvalds 已提交
3623 3624 3625
				qla_printk(KERN_WARNING, ha,
				    "Loop down - aborting ISP.\n");

3626
				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
3627 3628
			}
		}
3629
		DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
3630 3631
		    vha->host_no,
		    atomic_read(&vha->loop_down_timer)));
L
Linus Torvalds 已提交
3632 3633
	}

3634 3635
	/* Check if beacon LED needs to be blinked */
	if (ha->beacon_blink_led == 1) {
3636
		set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3637 3638 3639
		start_dpc++;
	}

3640
	/* Process any deferred work. */
3641
	if (!list_empty(&vha->work_list))
3642 3643
		start_dpc++;

L
Linus Torvalds 已提交
3644
	/* Schedule the DPC routine if needed */
3645 3646 3647
	if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
	    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
L
Linus Torvalds 已提交
3648
	    start_dpc ||
3649 3650
	    test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
	    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
3651 3652
	    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3653 3654 3655
	    test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
	    test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
		qla2xxx_wake_dpc(vha);
L
Linus Torvalds 已提交
3656

3657
	qla2x00_restart_timer(vha, WATCH_INTERVAL);
L
Linus Torvalds 已提交
3658 3659
}

3660 3661
/* Firmware interface routines. */

3662
#define FW_BLOBS	8
3663 3664 3665 3666
#define FW_ISP21XX	0
#define FW_ISP22XX	1
#define FW_ISP2300	2
#define FW_ISP2322	3
3667
#define FW_ISP24XX	4
3668
#define FW_ISP25XX	5
3669
#define FW_ISP81XX	6
3670
#define FW_ISP82XX	7
3671

3672 3673 3674 3675 3676
#define FW_FILE_ISP21XX	"ql2100_fw.bin"
#define FW_FILE_ISP22XX	"ql2200_fw.bin"
#define FW_FILE_ISP2300	"ql2300_fw.bin"
#define FW_FILE_ISP2322	"ql2322_fw.bin"
#define FW_FILE_ISP24XX	"ql2400_fw.bin"
3677
#define FW_FILE_ISP25XX	"ql2500_fw.bin"
3678
#define FW_FILE_ISP81XX	"ql8100_fw.bin"
3679
#define FW_FILE_ISP82XX	"ql8200_fw.bin"
3680

3681
static DEFINE_MUTEX(qla_fw_lock);
3682 3683

static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
3684 3685 3686 3687 3688
	{ .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
	{ .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
	{ .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
	{ .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
	{ .name = FW_FILE_ISP24XX, },
3689
	{ .name = FW_FILE_ISP25XX, },
3690
	{ .name = FW_FILE_ISP81XX, },
3691
	{ .name = FW_FILE_ISP82XX, },
3692 3693 3694
};

struct fw_blob *
3695
qla2x00_request_firmware(scsi_qla_host_t *vha)
3696
{
3697
	struct qla_hw_data *ha = vha->hw;
3698 3699 3700 3701 3702 3703 3704
	struct fw_blob *blob;

	blob = NULL;
	if (IS_QLA2100(ha)) {
		blob = &qla_fw_blobs[FW_ISP21XX];
	} else if (IS_QLA2200(ha)) {
		blob = &qla_fw_blobs[FW_ISP22XX];
3705
	} else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3706
		blob = &qla_fw_blobs[FW_ISP2300];
3707
	} else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
3708
		blob = &qla_fw_blobs[FW_ISP2322];
3709
	} else if (IS_QLA24XX_TYPE(ha)) {
3710
		blob = &qla_fw_blobs[FW_ISP24XX];
3711 3712
	} else if (IS_QLA25XX(ha)) {
		blob = &qla_fw_blobs[FW_ISP25XX];
3713 3714
	} else if (IS_QLA81XX(ha)) {
		blob = &qla_fw_blobs[FW_ISP81XX];
3715 3716
	} else if (IS_QLA82XX(ha)) {
		blob = &qla_fw_blobs[FW_ISP82XX];
3717 3718
	}

3719
	mutex_lock(&qla_fw_lock);
3720 3721 3722 3723 3724
	if (blob->fw)
		goto out;

	if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
		DEBUG2(printk("scsi(%ld): Failed to load firmware image "
3725
		    "(%s).\n", vha->host_no, blob->name));
3726 3727 3728 3729 3730 3731
		blob->fw = NULL;
		blob = NULL;
		goto out;
	}

out:
3732
	mutex_unlock(&qla_fw_lock);
3733 3734 3735 3736 3737 3738 3739 3740
	return blob;
}

static void
qla2x00_release_firmware(void)
{
	int idx;

3741
	mutex_lock(&qla_fw_lock);
3742 3743 3744
	for (idx = 0; idx < FW_BLOBS; idx++)
		if (qla_fw_blobs[idx].fw)
			release_firmware(qla_fw_blobs[idx].fw);
3745
	mutex_unlock(&qla_fw_lock);
3746 3747
}

3748 3749 3750
static pci_ers_result_t
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
3751 3752 3753 3754 3755
	scsi_qla_host_t *vha = pci_get_drvdata(pdev);
	struct qla_hw_data *ha = vha->hw;

	DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
	    state));
3756

3757 3758
	switch (state) {
	case pci_channel_io_normal:
3759
		ha->flags.eeh_busy = 0;
3760 3761
		return PCI_ERS_RESULT_CAN_RECOVER;
	case pci_channel_io_frozen:
3762
		ha->flags.eeh_busy = 1;
3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773
		/* For ISP82XX complete any pending mailbox cmd */
		if (IS_QLA82XX(ha)) {
			ha->flags.fw_hung = 1;
			if (ha->flags.mbox_busy) {
				ha->flags.mbox_int = 1;
				DEBUG2(qla_printk(KERN_ERR, ha,
					"Due to pci channel io frozen, doing premature "
					"completion of mbx command\n"));
				complete(&ha->mbx_intr_comp);
			}
		}
3774
		qla2x00_free_irqs(vha);
3775
		pci_disable_device(pdev);
3776 3777
		/* Return back all IOs */
		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3778 3779
		return PCI_ERS_RESULT_NEED_RESET;
	case pci_channel_io_perm_failure:
3780 3781
		ha->flags.pci_channel_io_perm_failure = 1;
		qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792
		return PCI_ERS_RESULT_DISCONNECT;
	}
	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t
qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
{
	int risc_paused = 0;
	uint32_t stat;
	unsigned long flags;
3793 3794
	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
	struct qla_hw_data *ha = base_vha->hw;
3795 3796 3797
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;

3798 3799 3800
	if (IS_QLA82XX(ha))
		return PCI_ERS_RESULT_RECOVERED;

3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819
	spin_lock_irqsave(&ha->hardware_lock, flags);
	if (IS_QLA2100(ha) || IS_QLA2200(ha)){
		stat = RD_REG_DWORD(&reg->hccr);
		if (stat & HCCR_RISC_PAUSE)
			risc_paused = 1;
	} else if (IS_QLA23XX(ha)) {
		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
		if (stat & HSR_RISC_PAUSED)
			risc_paused = 1;
	} else if (IS_FWI2_CAPABLE(ha)) {
		stat = RD_REG_DWORD(&reg24->host_status);
		if (stat & HSRX_RISC_PAUSED)
			risc_paused = 1;
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	if (risc_paused) {
		qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
		    "Dumping firmware!\n");
3820
		ha->isp_ops->fw_dump(base_vha, 0);
3821 3822 3823 3824 3825 3826

		return PCI_ERS_RESULT_NEED_RESET;
	} else
		return PCI_ERS_RESULT_RECOVERED;
}

3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929
uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
{
	uint32_t rval = QLA_FUNCTION_FAILED;
	uint32_t drv_active = 0;
	struct qla_hw_data *ha = base_vha->hw;
	int fn;
	struct pci_dev *other_pdev = NULL;

	DEBUG17(qla_printk(KERN_INFO, ha,
	    "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no));

	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);

	if (base_vha->flags.online) {
		/* Abort all outstanding commands,
		 * so as to be requeued later */
		qla2x00_abort_isp_cleanup(base_vha);
	}


	fn = PCI_FUNC(ha->pdev->devfn);
	while (fn > 0) {
		fn--;
		DEBUG17(qla_printk(KERN_INFO, ha,
		    "Finding pci device at function = 0x%x\n", fn));
		other_pdev =
		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
		    fn));

		if (!other_pdev)
			continue;
		if (atomic_read(&other_pdev->enable_cnt)) {
			DEBUG17(qla_printk(KERN_INFO, ha,
			    "Found PCI func availabe and enabled at 0x%x\n",
			    fn));
			pci_dev_put(other_pdev);
			break;
		}
		pci_dev_put(other_pdev);
	}

	if (!fn) {
		/* Reset owner */
		DEBUG17(qla_printk(KERN_INFO, ha,
		    "This devfn is reset owner = 0x%x\n", ha->pdev->devfn));
		qla82xx_idc_lock(ha);

		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
		    QLA82XX_DEV_INITIALIZING);

		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
		    QLA82XX_IDC_VERSION);

		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
		DEBUG17(qla_printk(KERN_INFO, ha,
		    "drv_active = 0x%x\n", drv_active));

		qla82xx_idc_unlock(ha);
		/* Reset if device is not already reset
		 * drv_active would be 0 if a reset has already been done
		 */
		if (drv_active)
			rval = qla82xx_start_firmware(base_vha);
		else
			rval = QLA_SUCCESS;
		qla82xx_idc_lock(ha);

		if (rval != QLA_SUCCESS) {
			qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
			qla82xx_clear_drv_active(ha);
			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
			    QLA82XX_DEV_FAILED);
		} else {
			qla_printk(KERN_INFO, ha, "HW State: READY\n");
			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
			    QLA82XX_DEV_READY);
			qla82xx_idc_unlock(ha);
			ha->flags.fw_hung = 0;
			rval = qla82xx_restart_isp(base_vha);
			qla82xx_idc_lock(ha);
			/* Clear driver state register */
			qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
			qla82xx_set_drv_active(base_vha);
		}
		qla82xx_idc_unlock(ha);
	} else {
		DEBUG17(qla_printk(KERN_INFO, ha,
		    "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
		if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
		    QLA82XX_DEV_READY)) {
			ha->flags.fw_hung = 0;
			rval = qla82xx_restart_isp(base_vha);
			qla82xx_idc_lock(ha);
			qla82xx_set_drv_active(base_vha);
			qla82xx_idc_unlock(ha);
		}
	}
	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);

	return rval;
}

3930 3931 3932 3933
static pci_ers_result_t
qla2xxx_pci_slot_reset(struct pci_dev *pdev)
{
	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
3934 3935
	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
	struct qla_hw_data *ha = base_vha->hw;
3936 3937
	struct rsp_que *rsp;
	int rc, retries = 10;
3938

3939 3940
	DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));

3941 3942 3943 3944 3945 3946 3947 3948
	/* Workaround: qla2xxx driver which access hardware earlier
	 * needs error state to be pci_channel_io_online.
	 * Otherwise mailbox command timesout.
	 */
	pdev->error_state = pci_channel_io_normal;

	pci_restore_state(pdev);

3949 3950 3951 3952 3953
	/* pci_restore_state() clears the saved_state flag of the device
	 * save restored state which resets saved_state flag
	 */
	pci_save_state(pdev);

3954 3955 3956 3957
	if (ha->mem_only)
		rc = pci_enable_device_mem(pdev);
	else
		rc = pci_enable_device(pdev);
3958

3959
	if (rc) {
3960 3961
		qla_printk(KERN_WARNING, ha,
		    "Can't re-enable PCI device after reset.\n");
3962
		goto exit_slot_reset;
3963 3964
	}

3965 3966
	rsp = ha->rsp_q_map[0];
	if (qla2x00_request_irqs(ha, rsp))
3967
		goto exit_slot_reset;
3968

3969
	if (ha->isp_ops->pci_config(base_vha))
3970 3971 3972 3973 3974 3975 3976 3977 3978
		goto exit_slot_reset;

	if (IS_QLA82XX(ha)) {
		if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
			ret = PCI_ERS_RESULT_RECOVERED;
			goto exit_slot_reset;
		} else
			goto exit_slot_reset;
	}
3979

3980 3981
	while (ha->flags.mbox_busy && retries--)
		msleep(1000);
3982

3983
	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3984
	if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
3985
		ret =  PCI_ERS_RESULT_RECOVERED;
3986
	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3987

3988

3989
exit_slot_reset:
3990 3991 3992
	DEBUG17(qla_printk(KERN_WARNING, ha,
	    "slot_reset-return:ret=%x\n", ret));

3993 3994 3995 3996 3997 3998
	return ret;
}

static void
qla2xxx_pci_resume(struct pci_dev *pdev)
{
3999 4000
	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
	struct qla_hw_data *ha = base_vha->hw;
4001 4002
	int ret;

4003 4004
	DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));

4005
	ret = qla2x00_wait_for_hba_online(base_vha);
4006 4007 4008 4009 4010
	if (ret != QLA_SUCCESS) {
		qla_printk(KERN_ERR, ha,
		    "the device failed to resume I/O "
		    "from slot/link_reset");
	}
4011

4012 4013
	pci_cleanup_aer_uncorrect_error_status(pdev);

4014
	ha->flags.eeh_busy = 0;
4015 4016 4017 4018 4019 4020 4021 4022 4023
}

static struct pci_error_handlers qla2xxx_err_handler = {
	.error_detected = qla2xxx_pci_error_detected,
	.mmio_enabled = qla2xxx_pci_mmio_enabled,
	.slot_reset = qla2xxx_pci_slot_reset,
	.resume = qla2xxx_pci_resume,
};

4024
static struct pci_device_id qla2xxx_pci_tbl[] = {
4025 4026 4027 4028 4029 4030 4031 4032 4033
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
4034
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
4035 4036
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
4037
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
4038
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
4039
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
4040 4041 4042 4043
	{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);

4044
static struct pci_driver qla2xxx_pci_driver = {
4045
	.name		= QLA2XXX_DRIVER_NAME,
4046 4047 4048
	.driver		= {
		.owner		= THIS_MODULE,
	},
4049
	.id_table	= qla2xxx_pci_tbl,
4050
	.probe		= qla2x00_probe_one,
A
Adrian Bunk 已提交
4051
	.remove		= qla2x00_remove_one,
4052
	.err_handler	= &qla2xxx_err_handler,
4053 4054
};

4055 4056
static struct file_operations apidev_fops = {
	.owner = THIS_MODULE,
4057
	.llseek = noop_llseek,
4058 4059
};

L
Linus Torvalds 已提交
4060 4061 4062 4063 4064 4065
/**
 * qla2x00_module_init - Module initialization.
 **/
static int __init
qla2x00_module_init(void)
{
4066 4067
	int ret = 0;

L
Linus Torvalds 已提交
4068
	/* Allocate cache for SRBs. */
4069
	srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
4070
	    SLAB_HWCACHE_ALIGN, NULL);
L
Linus Torvalds 已提交
4071 4072 4073 4074 4075 4076 4077 4078
	if (srb_cachep == NULL) {
		printk(KERN_ERR
		    "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
		return -ENOMEM;
	}

	/* Derive version string. */
	strcpy(qla2x00_version_str, QLA2XXX_VERSION);
4079
	if (ql2xextended_error_logging)
4080 4081
		strcat(qla2x00_version_str, "-debug");

4082 4083
	qla2xxx_transport_template =
	    fc_attach_transport(&qla2xxx_transport_functions);
4084 4085
	if (!qla2xxx_transport_template) {
		kmem_cache_destroy(srb_cachep);
L
Linus Torvalds 已提交
4086
		return -ENODEV;
4087
	}
4088 4089 4090 4091 4092 4093 4094

	apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
	if (apidev_major < 0) {
		printk(KERN_WARNING "qla2xxx: Unable to register char device "
		    "%s\n", QLA2XXX_APIDEV);
	}

4095 4096 4097 4098 4099
	qla2xxx_transport_vport_template =
	    fc_attach_transport(&qla2xxx_transport_vport_functions);
	if (!qla2xxx_transport_vport_template) {
		kmem_cache_destroy(srb_cachep);
		fc_release_transport(qla2xxx_transport_template);
L
Linus Torvalds 已提交
4100
		return -ENODEV;
4101
	}
L
Linus Torvalds 已提交
4102

4103 4104
	printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
	    qla2x00_version_str);
4105
	ret = pci_register_driver(&qla2xxx_pci_driver);
4106 4107 4108
	if (ret) {
		kmem_cache_destroy(srb_cachep);
		fc_release_transport(qla2xxx_transport_template);
4109
		fc_release_transport(qla2xxx_transport_vport_template);
4110 4111
	}
	return ret;
L
Linus Torvalds 已提交
4112 4113 4114 4115 4116 4117 4118 4119
}

/**
 * qla2x00_module_exit - Module cleanup.
 **/
static void __exit
qla2x00_module_exit(void)
{
4120
	unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
4121
	pci_unregister_driver(&qla2xxx_pci_driver);
4122
	qla2x00_release_firmware();
4123
	kmem_cache_destroy(srb_cachep);
4124 4125
	if (ctx_cachep)
		kmem_cache_destroy(ctx_cachep);
L
Linus Torvalds 已提交
4126
	fc_release_transport(qla2xxx_transport_template);
4127
	fc_release_transport(qla2xxx_transport_vport_template);
L
Linus Torvalds 已提交
4128 4129 4130 4131 4132 4133 4134 4135 4136
}

module_init(qla2x00_module_init);
module_exit(qla2x00_module_exit);

MODULE_AUTHOR("QLogic Corporation");
MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLA2XXX_VERSION);
4137 4138 4139 4140 4141
MODULE_FIRMWARE(FW_FILE_ISP21XX);
MODULE_FIRMWARE(FW_FILE_ISP22XX);
MODULE_FIRMWARE(FW_FILE_ISP2300);
MODULE_FIRMWARE(FW_FILE_ISP2322);
MODULE_FIRMWARE(FW_FILE_ISP24XX);
4142
MODULE_FIRMWARE(FW_FILE_ISP25XX);