qla_attr.c 60.6 KB
Newer Older
已提交
1
/*
A
Andrew Vasquez 已提交
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2014 QLogic Corporation
已提交
4
 *
A
Andrew Vasquez 已提交
5
 * See LICENSE.qla2xxx for copyright and licensing details.
已提交
6 7
 */
#include "qla_def.h"
8
#include "qla_target.h"
已提交
9

10
#include <linux/kthread.h>
已提交
11
#include <linux/vmalloc.h>
12
#include <linux/slab.h>
13
#include <linux/delay.h>
已提交
14

A
Adrian Bunk 已提交
15
static int qla24xx_vport_disable(struct fc_vport *, bool);
16

已提交
17 18 19
/* SYSFS attributes --------------------------------------------------------- */

static ssize_t
20
qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 22
			   struct bin_attribute *bin_attr,
			   char *buf, loff_t off, size_t count)
已提交
23
{
24
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
已提交
25
	    struct device, kobj)));
26
	struct qla_hw_data *ha = vha->hw;
27
	int rval = 0;
已提交
28

29
	if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
已提交
30 31
		return 0;

32
	if (IS_P3P_TYPE(ha)) {
33 34 35 36 37 38 39 40 41
		if (off < ha->md_template_size) {
			rval = memory_read_from_buffer(buf, count,
			    &off, ha->md_tmplt_hdr, ha->md_template_size);
			return rval;
		}
		off -= ha->md_template_size;
		rval = memory_read_from_buffer(buf, count,
		    &off, ha->md_dump, ha->md_dump_size);
		return rval;
42 43 44 45
	} else if (ha->mctp_dumped && ha->mctp_dump_reading)
		return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
		    MCTP_DUMP_SIZE);
	else if (ha->fw_dump_reading)
46
		return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47
					ha->fw_dump_len);
48 49
	else
		return 0;
已提交
50 51 52
}

static ssize_t
53
qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
54 55
			    struct bin_attribute *bin_attr,
			    char *buf, loff_t off, size_t count)
已提交
56
{
57
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
已提交
58
	    struct device, kobj)));
59
	struct qla_hw_data *ha = vha->hw;
已提交
60 61 62 63 64 65 66 67
	int reading;

	if (off != 0)
		return (0);

	reading = simple_strtol(buf, NULL, 10);
	switch (reading) {
	case 0:
68 69
		if (!ha->fw_dump_reading)
			break;
已提交
70

71
		ql_log(ql_log_info, vha, 0x705d,
72
		    "Firmware dump cleared on (%ld).\n", vha->host_no);
73

74
		if (IS_P3P_TYPE(ha)) {
75 76 77
			qla82xx_md_free(vha);
			qla82xx_md_prep(vha);
		}
78 79
		ha->fw_dump_reading = 0;
		ha->fw_dumped = 0;
已提交
80 81
		break;
	case 1:
82
		if (ha->fw_dumped && !ha->fw_dump_reading) {
已提交
83 84
			ha->fw_dump_reading = 1;

85
			ql_log(ql_log_info, vha, 0x705e,
86
			    "Raw firmware dump ready for read on (%ld).\n",
87
			    vha->host_no);
已提交
88 89
		}
		break;
90
	case 2:
91
		qla2x00_alloc_fw_dump(vha);
92
		break;
93
	case 3:
94 95 96 97
		if (IS_QLA82XX(ha)) {
			qla82xx_idc_lock(ha);
			qla82xx_set_reset_owner(vha);
			qla82xx_idc_unlock(ha);
98 99 100 101
		} else if (IS_QLA8044(ha)) {
			qla8044_idc_lock(ha);
			qla82xx_set_reset_owner(vha);
			qla8044_idc_unlock(ha);
102 103 104 105
		} else
			qla2x00_system_error(vha);
		break;
	case 4:
106
		if (IS_P3P_TYPE(ha)) {
107 108 109 110 111 112 113 114 115
			if (ha->md_tmplt_hdr)
				ql_dbg(ql_dbg_user, vha, 0x705b,
				    "MiniDump supported with this firmware.\n");
			else
				ql_dbg(ql_dbg_user, vha, 0x709d,
				    "MiniDump not supported with this firmware.\n");
		}
		break;
	case 5:
116
		if (IS_P3P_TYPE(ha))
117
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
118
		break;
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
	case 6:
		if (!ha->mctp_dump_reading)
			break;
		ql_log(ql_log_info, vha, 0x70c1,
		    "MCTP dump cleared on (%ld).\n", vha->host_no);
		ha->mctp_dump_reading = 0;
		ha->mctp_dumped = 0;
		break;
	case 7:
		if (ha->mctp_dumped && !ha->mctp_dump_reading) {
			ha->mctp_dump_reading = 1;
			ql_log(ql_log_info, vha, 0x70c2,
			    "Raw mctp dump ready for read on (%ld).\n",
			    vha->host_no);
		}
		break;
已提交
135
	}
136
	return count;
已提交
137 138 139 140 141 142 143 144 145 146 147 148 149
}

static struct bin_attribute sysfs_fw_dump_attr = {
	.attr = {
		.name = "fw_dump",
		.mode = S_IRUSR | S_IWUSR,
	},
	.size = 0,
	.read = qla2x00_sysfs_read_fw_dump,
	.write = qla2x00_sysfs_write_fw_dump,
};

static ssize_t
150
qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
151 152
			 struct bin_attribute *bin_attr,
			 char *buf, loff_t off, size_t count)
已提交
153
{
154
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
已提交
155
	    struct device, kobj)));
156
	struct qla_hw_data *ha = vha->hw;
已提交
157

158
	if (!capable(CAP_SYS_ADMIN))
已提交
159 160
		return 0;

161
	if (IS_NOCACHE_VPD_TYPE(ha))
162
		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
163
		    ha->nvram_size);
164 165
	return memory_read_from_buffer(buf, count, &off, ha->nvram,
					ha->nvram_size);
已提交
166 167 168
}

static ssize_t
169
qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
170 171
			  struct bin_attribute *bin_attr,
			  char *buf, loff_t off, size_t count)
已提交
172
{
173
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
已提交
174
	    struct device, kobj)));
175
	struct qla_hw_data *ha = vha->hw;
已提交
176 177
	uint16_t	cnt;

178 179
	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
	    !ha->isp_ops->write_nvram)
180
		return -EINVAL;
已提交
181 182

	/* Checksum NVRAM. */
183
	if (IS_FWI2_CAPABLE(ha)) {
184 185 186 187 188
		uint32_t *iter;
		uint32_t chksum;

		iter = (uint32_t *)buf;
		chksum = 0;
189 190
		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
			chksum += le32_to_cpu(*iter);
191 192 193 194 195 196 197 198 199 200 201 202 203
		chksum = ~chksum + 1;
		*iter = cpu_to_le32(chksum);
	} else {
		uint8_t *iter;
		uint8_t chksum;

		iter = (uint8_t *)buf;
		chksum = 0;
		for (cnt = 0; cnt < count - 1; cnt++)
			chksum += *iter++;
		chksum = ~chksum + 1;
		*iter = chksum;
	}
已提交
204

205
	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
206
		ql_log(ql_log_warn, vha, 0x705f,
207 208 209 210
		    "HBA not online, failing NVRAM update.\n");
		return -EAGAIN;
	}

已提交
211
	/* Write NVRAM. */
212 213
	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
214
	    count);
已提交
215

216 217
	ql_dbg(ql_dbg_user, vha, 0x7060,
	    "Setting ISP_ABORT_NEEDED\n");
218
	/* NVRAM settings take effect immediately. */
219
	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
220 221
	qla2xxx_wake_dpc(vha);
	qla2x00_wait_for_chip_reset(vha);
222

223
	return count;
已提交
224 225 226 227 228 229 230
}

static struct bin_attribute sysfs_nvram_attr = {
	.attr = {
		.name = "nvram",
		.mode = S_IRUSR | S_IWUSR,
	},
231
	.size = 512,
已提交
232 233 234 235
	.read = qla2x00_sysfs_read_nvram,
	.write = qla2x00_sysfs_write_nvram,
};

236
static ssize_t
237
qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
238 239
			  struct bin_attribute *bin_attr,
			  char *buf, loff_t off, size_t count)
240
{
241
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
242
	    struct device, kobj)));
243
	struct qla_hw_data *ha = vha->hw;
244
	ssize_t rval = 0;
245 246 247 248

	if (ha->optrom_state != QLA_SREADING)
		return 0;

249 250 251 252 253 254
	mutex_lock(&ha->optrom_mutex);
	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
	    ha->optrom_region_size);
	mutex_unlock(&ha->optrom_mutex);

	return rval;
255 256 257
}

static ssize_t
258
qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
259 260
			   struct bin_attribute *bin_attr,
			   char *buf, loff_t off, size_t count)
261
{
262
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
263
	    struct device, kobj)));
264
	struct qla_hw_data *ha = vha->hw;
265 266 267

	if (ha->optrom_state != QLA_SWRITING)
		return -EINVAL;
268
	if (off > ha->optrom_region_size)
269
		return -ERANGE;
270 271
	if (off + count > ha->optrom_region_size)
		count = ha->optrom_region_size - off;
272

273
	mutex_lock(&ha->optrom_mutex);
274
	memcpy(&ha->optrom_buffer[off], buf, count);
275
	mutex_unlock(&ha->optrom_mutex);
276 277 278 279 280 281 282 283 284

	return count;
}

static struct bin_attribute sysfs_optrom_attr = {
	.attr = {
		.name = "optrom",
		.mode = S_IRUSR | S_IWUSR,
	},
285
	.size = 0,
286 287 288 289 290
	.read = qla2x00_sysfs_read_optrom,
	.write = qla2x00_sysfs_write_optrom,
};

static ssize_t
291
qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
292 293
			       struct bin_attribute *bin_attr,
			       char *buf, loff_t off, size_t count)
294
{
295
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
296
	    struct device, kobj)));
297
	struct qla_hw_data *ha = vha->hw;
298 299 300
	uint32_t start = 0;
	uint32_t size = ha->optrom_size;
	int val, valid;
301
	ssize_t rval = count;
302 303

	if (off)
304
		return -EINVAL;
305

306
	if (unlikely(pci_channel_offline(ha->pdev)))
307
		return -EAGAIN;
308

309 310 311
	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
		return -EINVAL;
	if (start > ha->optrom_size)
312 313
		return -EINVAL;

314
	mutex_lock(&ha->optrom_mutex);
315 316 317
	switch (val) {
	case 0:
		if (ha->optrom_state != QLA_SREADING &&
318 319 320 321
		    ha->optrom_state != QLA_SWRITING) {
			rval =  -EINVAL;
			goto out;
		}
322
		ha->optrom_state = QLA_SWAITING;
323

324
		ql_dbg(ql_dbg_user, vha, 0x7061,
325
		    "Freeing flash region allocation -- 0x%x bytes.\n",
326
		    ha->optrom_region_size);
327

328 329 330 331
		vfree(ha->optrom_buffer);
		ha->optrom_buffer = NULL;
		break;
	case 1:
332 333 334 335
		if (ha->optrom_state != QLA_SWAITING) {
			rval = -EINVAL;
			goto out;
		}
336

337 338 339 340
		ha->optrom_region_start = start;
		ha->optrom_region_size = start + size > ha->optrom_size ?
		    ha->optrom_size - start : size;

341
		ha->optrom_state = QLA_SREADING;
342
		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
343
		if (ha->optrom_buffer == NULL) {
344
			ql_log(ql_log_warn, vha, 0x7062,
345
			    "Unable to allocate memory for optrom retrieval "
346
			    "(%x).\n", ha->optrom_region_size);
347 348

			ha->optrom_state = QLA_SWAITING;
349 350
			rval = -ENOMEM;
			goto out;
351 352
		}

353
		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
354 355
			ql_log(ql_log_warn, vha, 0x7063,
			    "HBA not online, failing NVRAM update.\n");
356 357
			rval = -EAGAIN;
			goto out;
358 359
		}

360
		ql_dbg(ql_dbg_user, vha, 0x7064,
361
		    "Reading flash region -- 0x%x/0x%x.\n",
362
		    ha->optrom_region_start, ha->optrom_region_size);
363 364

		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
365
		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
366
		    ha->optrom_region_start, ha->optrom_region_size);
367 368
		break;
	case 2:
369 370 371 372
		if (ha->optrom_state != QLA_SWAITING) {
			rval = -EINVAL;
			goto out;
		}
373

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
		/*
		 * We need to be more restrictive on which FLASH regions are
		 * allowed to be updated via user-space.  Regions accessible
		 * via this method include:
		 *
		 * ISP21xx/ISP22xx/ISP23xx type boards:
		 *
		 * 	0x000000 -> 0x020000 -- Boot code.
		 *
		 * ISP2322/ISP24xx type boards:
		 *
		 * 	0x000000 -> 0x07ffff -- Boot code.
		 * 	0x080000 -> 0x0fffff -- Firmware.
		 *
		 * ISP25xx type boards:
		 *
		 * 	0x000000 -> 0x07ffff -- Boot code.
		 * 	0x080000 -> 0x0fffff -- Firmware.
		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
		 */
		valid = 0;
		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
			valid = 1;
397 398
		else if (start == (ha->flt_region_boot * 4) ||
		    start == (ha->flt_region_fw * 4))
399
			valid = 1;
400
		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
401 402
			|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
			|| IS_QLA27XX(ha))
403
			valid = 1;
404
		if (!valid) {
405
			ql_log(ql_log_warn, vha, 0x7065,
406
			    "Invalid start region 0x%x/0x%x.\n", start, size);
407 408
			rval = -EINVAL;
			goto out;
409 410 411 412 413 414
		}

		ha->optrom_region_start = start;
		ha->optrom_region_size = start + size > ha->optrom_size ?
		    ha->optrom_size - start : size;

415
		ha->optrom_state = QLA_SWRITING;
416
		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
417
		if (ha->optrom_buffer == NULL) {
418
			ql_log(ql_log_warn, vha, 0x7066,
419
			    "Unable to allocate memory for optrom update "
420
			    "(%x)\n", ha->optrom_region_size);
421 422

			ha->optrom_state = QLA_SWAITING;
423 424
			rval = -ENOMEM;
			goto out;
425
		}
426

427
		ql_dbg(ql_dbg_user, vha, 0x7067,
428
		    "Staging flash region write -- 0x%x/0x%x.\n",
429
		    ha->optrom_region_start, ha->optrom_region_size);
430 431

		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
432 433
		break;
	case 3:
434 435 436 437
		if (ha->optrom_state != QLA_SWRITING) {
			rval = -EINVAL;
			goto out;
		}
438

439
		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
440
			ql_log(ql_log_warn, vha, 0x7068,
441
			    "HBA not online, failing flash update.\n");
442 443
			rval = -EAGAIN;
			goto out;
444 445
		}

446
		ql_dbg(ql_dbg_user, vha, 0x7069,
447
		    "Writing flash region -- 0x%x/0x%x.\n",
448
		    ha->optrom_region_start, ha->optrom_region_size);
449

450
		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
451
		    ha->optrom_region_start, ha->optrom_region_size);
452
		break;
453
	default:
454
		rval = -EINVAL;
455
	}
456 457 458 459

out:
	mutex_unlock(&ha->optrom_mutex);
	return rval;
460 461 462 463 464 465 466 467 468 469 470
}

static struct bin_attribute sysfs_optrom_ctl_attr = {
	.attr = {
		.name = "optrom_ctl",
		.mode = S_IWUSR,
	},
	.size = 0,
	.write = qla2x00_sysfs_write_optrom_ctl,
};

471
static ssize_t
472
qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
473 474
		       struct bin_attribute *bin_attr,
		       char *buf, loff_t off, size_t count)
475
{
476
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
477
	    struct device, kobj)));
478
	struct qla_hw_data *ha = vha->hw;
479
	uint32_t faddr;
480

481
	if (unlikely(pci_channel_offline(ha->pdev)))
482
		return -EAGAIN;
483

484
	if (!capable(CAP_SYS_ADMIN))
485
		return -EINVAL;
486

487 488 489 490 491 492 493 494
	if (IS_NOCACHE_VPD_TYPE(ha)) {
		faddr = ha->flt_region_vpd << 2;

		if (IS_QLA27XX(ha) &&
		    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
			faddr = ha->flt_region_vpd_sec << 2;

		ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
495
		    ha->vpd_size);
496
	}
497
	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
498 499 500
}

static ssize_t
501
qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
502 503
			struct bin_attribute *bin_attr,
			char *buf, loff_t off, size_t count)
504
{
505
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
506
	    struct device, kobj)));
507
	struct qla_hw_data *ha = vha->hw;
508
	uint8_t *tmp_data;
509

510 511 512
	if (unlikely(pci_channel_offline(ha->pdev)))
		return 0;

513 514
	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
	    !ha->isp_ops->write_nvram)
515 516
		return 0;

517
	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
518
		ql_log(ql_log_warn, vha, 0x706a,
519 520 521 522
		    "HBA not online, failing VPD update.\n");
		return -EAGAIN;
	}

523
	/* Write NVRAM. */
524 525
	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
526

527 528
	/* Update flash version information for 4Gb & above. */
	if (!IS_FWI2_CAPABLE(ha))
529
		return -EINVAL;
530 531 532

	tmp_data = vmalloc(256);
	if (!tmp_data) {
533
		ql_log(ql_log_warn, vha, 0x706b,
534
		    "Unable to allocate memory for VPD information update.\n");
535
		return -ENOMEM;
536 537 538
	}
	ha->isp_ops->get_flash_version(vha, tmp_data);
	vfree(tmp_data);
539

540 541 542 543 544 545 546 547 548 549 550 551 552
	return count;
}

static struct bin_attribute sysfs_vpd_attr = {
	.attr = {
		.name = "vpd",
		.mode = S_IRUSR | S_IWUSR,
	},
	.size = 0,
	.read = qla2x00_sysfs_read_vpd,
	.write = qla2x00_sysfs_write_vpd,
};

553
static ssize_t
554
qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
555 556
		       struct bin_attribute *bin_attr,
		       char *buf, loff_t off, size_t count)
557
{
558
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
559
	    struct device, kobj)));
560
	struct qla_hw_data *ha = vha->hw;
561 562 563 564 565 566
	uint16_t iter, addr, offset;
	int rval;

	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
		return 0;

567 568 569 570 571 572
	if (ha->sfp_data)
		goto do_read;

	ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
	    &ha->sfp_data_dma);
	if (!ha->sfp_data) {
573
		ql_log(ql_log_warn, vha, 0x706c,
574 575 576 577 578 579
		    "Unable to allocate memory for SFP read-data.\n");
		return 0;
	}

do_read:
	memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
580 581 582 583 584 585 586 587 588
	addr = 0xa0;
	for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
	    iter++, offset += SFP_BLOCK_SIZE) {
		if (iter == 4) {
			/* Skip to next device address. */
			addr = 0xa2;
			offset = 0;
		}

589
		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
590
		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
591
		if (rval != QLA_SUCCESS) {
592
			ql_log(ql_log_warn, vha, 0x706d,
593 594
			    "Unable to read SFP data (%x/%x/%x).\n", rval,
			    addr, offset);
595

596
			return -EIO;
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
		}
		memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
		buf += SFP_BLOCK_SIZE;
	}

	return count;
}

static struct bin_attribute sysfs_sfp_attr = {
	.attr = {
		.name = "sfp",
		.mode = S_IRUSR | S_IWUSR,
	},
	.size = SFP_DEV_SIZE * 2,
	.read = qla2x00_sysfs_read_sfp,
};

614
static ssize_t
615
qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
616 617 618 619 620 621
			struct bin_attribute *bin_attr,
			char *buf, loff_t off, size_t count)
{
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
	    struct device, kobj)));
	struct qla_hw_data *ha = vha->hw;
622
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
623
	int type;
624
	uint32_t idc_control;
625
	uint8_t *tmp_data = NULL;
626
	if (off != 0)
627
		return -EINVAL;
628 629 630 631

	type = simple_strtol(buf, NULL, 10);
	switch (type) {
	case 0x2025c:
632 633
		ql_log(ql_log_info, vha, 0x706e,
		    "Issuing ISP reset.\n");
634 635

		scsi_block_requests(vha->host);
636
		if (IS_QLA82XX(ha)) {
637
			ha->flags.isp82xx_no_md_cap = 1;
638 639 640
			qla82xx_idc_lock(ha);
			qla82xx_set_reset_owner(vha);
			qla82xx_idc_unlock(ha);
641 642 643 644 645 646 647 648 649 650 651
		} else if (IS_QLA8044(ha)) {
			qla8044_idc_lock(ha);
			idc_control = qla8044_rd_reg(ha,
			    QLA8044_IDC_DRV_CTRL);
			qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
			    (idc_control | GRACEFUL_RESET_BIT1));
			qla82xx_set_reset_owner(vha);
			qla8044_idc_unlock(ha);
		} else {
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
			qla2xxx_wake_dpc(vha);
652
		}
653 654 655 656
		qla2x00_wait_for_chip_reset(vha);
		scsi_unblock_requests(vha->host);
		break;
	case 0x2025d:
657
		if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
658
			return -EPERM;
659

660 661
		ql_log(ql_log_info, vha, 0x706f,
		    "Issuing MPI reset.\n");
662

663
		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
			uint32_t idc_control;

			qla83xx_idc_lock(vha, 0);
			__qla83xx_get_idc_control(vha, &idc_control);
			idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
			__qla83xx_set_idc_control(vha, idc_control);
			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
			    QLA8XXX_DEV_NEED_RESET);
			qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
			qla83xx_idc_unlock(vha, 0);
			break;
		} else {
			/* Make sure FC side is not in reset */
			qla2x00_wait_for_hba_online(vha);

			/* Issue MPI reset */
			scsi_block_requests(vha->host);
			if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
				ql_log(ql_log_warn, vha, 0x7070,
				    "MPI reset failed.\n");
			scsi_unblock_requests(vha->host);
			break;
		}
687
	case 0x2025e:
688
		if (!IS_P3P_TYPE(ha) || vha != base_vha) {
689 690
			ql_log(ql_log_info, vha, 0x7071,
			    "FCoE ctx reset no supported.\n");
691
			return -EPERM;
692 693
		}

694 695
		ql_log(ql_log_info, vha, 0x7072,
		    "Issuing FCoE ctx reset.\n");
696 697 698 699
		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		qla2xxx_wake_dpc(vha);
		qla2x00_wait_for_fcoe_ctx_reset(vha);
		break;
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
	case 0x2025f:
		if (!IS_QLA8031(ha))
			return -EPERM;
		ql_log(ql_log_info, vha, 0x70bc,
		    "Disabling Reset by IDC control\n");
		qla83xx_idc_lock(vha, 0);
		__qla83xx_get_idc_control(vha, &idc_control);
		idc_control |= QLA83XX_IDC_RESET_DISABLED;
		__qla83xx_set_idc_control(vha, idc_control);
		qla83xx_idc_unlock(vha, 0);
		break;
	case 0x20260:
		if (!IS_QLA8031(ha))
			return -EPERM;
		ql_log(ql_log_info, vha, 0x70bd,
		    "Enabling Reset by IDC control\n");
		qla83xx_idc_lock(vha, 0);
		__qla83xx_get_idc_control(vha, &idc_control);
		idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
		__qla83xx_set_idc_control(vha, idc_control);
		qla83xx_idc_unlock(vha, 0);
		break;
722 723 724 725 726 727 728 729 730 731 732 733 734
	case 0x20261:
		ql_dbg(ql_dbg_user, vha, 0x70e0,
		    "Updating cache versions without reset ");

		tmp_data = vmalloc(256);
		if (!tmp_data) {
			ql_log(ql_log_warn, vha, 0x70e1,
			    "Unable to allocate memory for VPD information update.\n");
			return -ENOMEM;
		}
		ha->isp_ops->get_flash_version(vha, tmp_data);
		vfree(tmp_data);
		break;
735 736 737 738 739 740 741 742 743 744 745 746 747
	}
	return count;
}

static struct bin_attribute sysfs_reset_attr = {
	.attr = {
		.name = "reset",
		.mode = S_IWUSR,
	},
	.size = 0,
	.write = qla2x00_sysfs_write_reset,
};

748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
static ssize_t
qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
			struct bin_attribute *bin_attr,
			char *buf, loff_t off, size_t count)
{
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
	    struct device, kobj)));
	int type;
	int rval = 0;
	port_id_t did;

	type = simple_strtol(buf, NULL, 10);

	did.b.domain = (type & 0x00ff0000) >> 16;
	did.b.area = (type & 0x0000ff00) >> 8;
	did.b.al_pa = (type & 0x000000ff);

	ql_log(ql_log_info, vha, 0x70e3, "portid=%02x%02x%02x done\n",
	    did.b.domain, did.b.area, did.b.al_pa);

	ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);

	rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
	return count;
}

static struct bin_attribute sysfs_issue_logo_attr = {
	.attr = {
		.name = "issue_logo",
		.mode = S_IWUSR,
	},
	.size = 0,
	.write = qla2x00_issue_logo,
};

783
static ssize_t
784
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
		       struct bin_attribute *bin_attr,
		       char *buf, loff_t off, size_t count)
{
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
	    struct device, kobj)));
	struct qla_hw_data *ha = vha->hw;
	int rval;
	uint16_t actual_size;

	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
		return 0;

	if (ha->xgmac_data)
		goto do_read;

	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
	    &ha->xgmac_data_dma, GFP_KERNEL);
	if (!ha->xgmac_data) {
803
		ql_log(ql_log_warn, vha, 0x7076,
804 805 806 807 808 809 810 811 812 813 814
		    "Unable to allocate memory for XGMAC read-data.\n");
		return 0;
	}

do_read:
	actual_size = 0;
	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);

	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
	    XGMAC_DATA_SIZE, &actual_size);
	if (rval != QLA_SUCCESS) {
815
		ql_log(ql_log_warn, vha, 0x7077,
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
		    "Unable to read XGMAC data (%x).\n", rval);
		count = 0;
	}

	count = actual_size > count ? count: actual_size;
	memcpy(buf, ha->xgmac_data, count);

	return count;
}

static struct bin_attribute sysfs_xgmac_stats_attr = {
	.attr = {
		.name = "xgmac_stats",
		.mode = S_IRUSR,
	},
	.size = 0,
	.read = qla2x00_sysfs_read_xgmac_stats,
};

835
static ssize_t
836
qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
		       struct bin_attribute *bin_attr,
		       char *buf, loff_t off, size_t count)
{
	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
	    struct device, kobj)));
	struct qla_hw_data *ha = vha->hw;
	int rval;

	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
		return 0;

	if (ha->dcbx_tlv)
		goto do_read;

	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
	    &ha->dcbx_tlv_dma, GFP_KERNEL);
	if (!ha->dcbx_tlv) {
854
		ql_log(ql_log_warn, vha, 0x7078,
855
		    "Unable to allocate memory for DCBX TLV read-data.\n");
856
		return -ENOMEM;
857 858 859 860 861 862 863 864
	}

do_read:
	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);

	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
	    DCBX_TLV_DATA_SIZE);
	if (rval != QLA_SUCCESS) {
865 866
		ql_log(ql_log_warn, vha, 0x7079,
		    "Unable to read DCBX TLV (%x).\n", rval);
867
		return -EIO;
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
	}

	memcpy(buf, ha->dcbx_tlv, count);

	return count;
}

static struct bin_attribute sysfs_dcbx_tlv_attr = {
	.attr = {
		.name = "dcbx_tlv",
		.mode = S_IRUSR,
	},
	.size = 0,
	.read = qla2x00_sysfs_read_dcbx_tlv,
};

884 885 886 887 888 889 890 891 892 893 894
static struct sysfs_entry {
	char *name;
	struct bin_attribute *attr;
	int is4GBp_only;
} bin_file_entries[] = {
	{ "fw_dump", &sysfs_fw_dump_attr, },
	{ "nvram", &sysfs_nvram_attr, },
	{ "optrom", &sysfs_optrom_attr, },
	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
	{ "vpd", &sysfs_vpd_attr, 1 },
	{ "sfp", &sysfs_sfp_attr, 1 },
895
	{ "reset", &sysfs_reset_attr, },
896
	{ "issue_logo", &sysfs_issue_logo_attr, },
897
	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
898
	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
899
	{ NULL },
900 901
};

已提交
902
void
903
qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
已提交
904
{
905
	struct Scsi_Host *host = vha->host;
906 907
	struct sysfs_entry *iter;
	int ret;
已提交
908

909
	for (iter = bin_file_entries; iter->name; iter++) {
910
		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
911
			continue;
912 913
		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
			continue;
914
		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
915
			continue;
916 917 918 919

		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
		    iter->attr);
		if (ret)
920 921 922 923 924 925 926
			ql_log(ql_log_warn, vha, 0x00f3,
			    "Unable to create sysfs %s binary attribute (%d).\n",
			    iter->name, ret);
		else
			ql_dbg(ql_dbg_init, vha, 0x00f4,
			    "Successfully created sysfs %s binary attribure.\n",
			    iter->name);
927
	}
已提交
928 929 930
}

void
931
qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
已提交
932
{
933
	struct Scsi_Host *host = vha->host;
934
	struct sysfs_entry *iter;
935
	struct qla_hw_data *ha = vha->hw;
936 937

	for (iter = bin_file_entries; iter->name; iter++) {
938
		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
939
			continue;
940 941
		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
			continue;
942
		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
943
			continue;
944 945
		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
			continue;
已提交
946

947
		sysfs_remove_bin_file(&host->shost_gendev.kobj,
948
		    iter->attr);
949
	}
950

951
	if (stop_beacon && ha->beacon_blink_led == 1)
952
		ha->isp_ops->beacon_off(vha);
已提交
953 954
}

955 956 957
/* Scsi_Host attributes. */

static ssize_t
958 959
qla2x00_drvr_version_show(struct device *dev,
			  struct device_attribute *attr, char *buf)
960
{
961
	return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
962 963 964
}

static ssize_t
965 966
qla2x00_fw_version_show(struct device *dev,
			struct device_attribute *attr, char *buf)
967
{
968 969 970
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
	char fw_str[128];
971

972
	return scnprintf(buf, PAGE_SIZE, "%s\n",
973
	    ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
974 975 976
}

static ssize_t
977 978
qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
			char *buf)
979
{
980 981
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
982 983
	uint32_t sn;

984
	if (IS_QLAFX00(vha->hw)) {
985
		return scnprintf(buf, PAGE_SIZE, "%s\n",
986 987
		    vha->hw->mr.serial_num);
	} else if (IS_FWI2_CAPABLE(ha)) {
988 989
		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
		return strlen(strcat(buf, "\n"));
990
	}
991

992
	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
993
	return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
994 995 996 997
	    sn % 100000);
}

static ssize_t
998 999
qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
		      char *buf)
1000
{
1001
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1002
	return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1003 1004 1005
}

static ssize_t
1006 1007
qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
		    char *buf)
1008
{
1009 1010
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
1011 1012

	if (IS_QLAFX00(vha->hw))
1013
		return scnprintf(buf, PAGE_SIZE, "%s\n",
1014 1015
		    vha->hw->mr.hw_version);

1016
	return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1017 1018 1019 1020 1021
	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
	    ha->product_id[3]);
}

static ssize_t
1022 1023
qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
			char *buf)
1024
{
1025
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1026

1027
	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1028 1029 1030
}

static ssize_t
1031 1032
qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
			char *buf)
1033
{
1034
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1035
	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1036 1037 1038
}

static ssize_t
1039 1040
qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
		      char *buf)
1041
{
1042
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1043 1044
	char pci_info[30];

1045
	return scnprintf(buf, PAGE_SIZE, "%s\n",
1046
	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
1047 1048 1049
}

static ssize_t
1050 1051
qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
			char *buf)
1052
{
1053 1054
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
1055 1056
	int len = 0;

1057
	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1058 1059
	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
	    vha->device_flags & DFLG_NO_CABLE)
1060
		len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1061
	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1062
	    qla2x00_reset_active(vha))
1063
		len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1064
	else {
1065
		len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1066 1067 1068

		switch (ha->current_topology) {
		case ISP_CFG_NL:
1069
			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1070 1071
			break;
		case ISP_CFG_FL:
1072
			len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1073 1074
			break;
		case ISP_CFG_N:
1075
			len += scnprintf(buf + len, PAGE_SIZE-len,
1076 1077 1078
			    "N_Port to N_Port\n");
			break;
		case ISP_CFG_F:
1079
			len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1080 1081
			break;
		default:
1082
			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1083 1084 1085 1086 1087 1088
			break;
		}
	}
	return len;
}

1089
static ssize_t
1090 1091
qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
		 char *buf)
1092
{
1093
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1094 1095
	int len = 0;

1096
	switch (vha->hw->zio_mode) {
1097
	case QLA_ZIO_MODE_6:
1098
		len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1099 1100
		break;
	case QLA_ZIO_DISABLED:
1101
		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1102 1103 1104 1105 1106 1107
		break;
	}
	return len;
}

static ssize_t
1108 1109
qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
		  const char *buf, size_t count)
1110
{
1111 1112
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
1113 1114 1115
	int val = 0;
	uint16_t zio_mode;

1116 1117 1118
	if (!IS_ZIO_SUPPORTED(ha))
		return -ENOTSUPP;

1119 1120 1121
	if (sscanf(buf, "%d", &val) != 1)
		return -EINVAL;

1122
	if (val)
1123
		zio_mode = QLA_ZIO_MODE_6;
1124
	else
1125 1126 1127 1128 1129
		zio_mode = QLA_ZIO_DISABLED;

	/* Update per-hba values and queue a reset. */
	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
		ha->zio_mode = zio_mode;
1130
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1131 1132 1133 1134 1135
	}
	return strlen(buf);
}

static ssize_t
1136 1137
qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
		       char *buf)
1138
{
1139
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1140

1141
	return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1142 1143 1144
}

static ssize_t
1145 1146
qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
			const char *buf, size_t count)
1147
{
1148
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1149 1150 1151 1152 1153 1154 1155 1156 1157
	int val = 0;
	uint16_t zio_timer;

	if (sscanf(buf, "%d", &val) != 1)
		return -EINVAL;
	if (val > 25500 || val < 100)
		return -ERANGE;

	zio_timer = (uint16_t)(val / 100);
1158
	vha->hw->zio_timer = zio_timer;
1159 1160 1161 1162

	return strlen(buf);
}

1163
static ssize_t
1164 1165
qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
		    char *buf)
1166
{
1167
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1168 1169
	int len = 0;

1170
	if (vha->hw->beacon_blink_led)
1171
		len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1172
	else
1173
		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1174 1175 1176 1177
	return len;
}

static ssize_t
1178 1179
qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
		     const char *buf, size_t count)
1180
{
1181 1182
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
1183 1184 1185 1186 1187 1188
	int val = 0;
	int rval;

	if (IS_QLA2100(ha) || IS_QLA2200(ha))
		return -EPERM;

1189
	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1190
		ql_log(ql_log_warn, vha, 0x707a,
1191 1192 1193 1194 1195 1196 1197 1198
		    "Abort ISP active -- ignoring beacon request.\n");
		return -EBUSY;
	}

	if (sscanf(buf, "%d", &val) != 1)
		return -EINVAL;

	if (val)
1199
		rval = ha->isp_ops->beacon_on(vha);
1200
	else
1201
		rval = ha->isp_ops->beacon_off(vha);
1202 1203 1204 1205 1206 1207 1208

	if (rval != QLA_SUCCESS)
		count = 0;

	return count;
}

1209
static ssize_t
1210 1211
qla2x00_optrom_bios_version_show(struct device *dev,
				 struct device_attribute *attr, char *buf)
1212
{
1213 1214
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
1215
	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1216 1217 1218 1219
	    ha->bios_revision[0]);
}

static ssize_t
1220 1221
qla2x00_optrom_efi_version_show(struct device *dev,
				struct device_attribute *attr, char *buf)
1222
{
1223 1224
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
1225
	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1226 1227 1228 1229
	    ha->efi_revision[0]);
}

static ssize_t
1230 1231
qla2x00_optrom_fcode_version_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
1232
{
1233 1234
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
1235
	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1236 1237 1238 1239
	    ha->fcode_revision[0]);
}

static ssize_t
1240 1241
qla2x00_optrom_fw_version_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
1242
{
1243 1244
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
1245
	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1246 1247 1248 1249
	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
	    ha->fw_revision[3]);
}

1250 1251 1252 1253 1254 1255 1256
static ssize_t
qla2x00_optrom_gold_fw_version_show(struct device *dev,
    struct device_attribute *attr, char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;

1257
	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1258
		return scnprintf(buf, PAGE_SIZE, "\n");
1259

1260
	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1261 1262 1263 1264
	    ha->gold_fw_version[0], ha->gold_fw_version[1],
	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
}

1265 1266 1267 1268
static ssize_t
qla2x00_total_isp_aborts_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
1269
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1270
	return scnprintf(buf, PAGE_SIZE, "%d\n",
1271
	    vha->qla_stats.total_isp_aborts);
1272 1273
}

1274 1275 1276 1277 1278 1279 1280 1281 1282
static ssize_t
qla24xx_84xx_fw_version_show(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	int rval = QLA_SUCCESS;
	uint16_t status[2] = {0, 0};
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;

1283
	if (!IS_QLA84XX(ha))
1284
		return scnprintf(buf, PAGE_SIZE, "\n");
1285

1286
	if (ha->cs84xx->op_fw_version == 0)
1287
		rval = qla84xx_verify_chip(vha, status);
1288 1289

	if ((rval == QLA_SUCCESS) && (status[0] == 0))
1290
		return scnprintf(buf, PAGE_SIZE, "%u\n",
1291 1292
			(uint32_t)ha->cs84xx->op_fw_version);

1293
	return scnprintf(buf, PAGE_SIZE, "\n");
1294 1295
}

1296 1297 1298 1299 1300 1301 1302
static ssize_t
qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
    char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;

1303 1304
	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
	    !IS_QLA27XX(ha))
1305
		return scnprintf(buf, PAGE_SIZE, "\n");
1306

1307
	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1308
	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
	    ha->mpi_capabilities);
}

static ssize_t
qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
    char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;

1319
	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1320
		return scnprintf(buf, PAGE_SIZE, "\n");
1321

1322
	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1323
	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1324 1325
}

1326 1327 1328 1329 1330 1331 1332
static ssize_t
qla2x00_flash_block_size_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;

1333
	return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1334 1335
}

1336 1337 1338 1339 1340 1341
static ssize_t
qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
    char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));

1342
	if (!IS_CNA_CAPABLE(vha->hw))
1343
		return scnprintf(buf, PAGE_SIZE, "\n");
1344

1345
	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1346 1347 1348 1349 1350 1351 1352 1353
}

static ssize_t
qla2x00_vn_port_mac_address_show(struct device *dev,
    struct device_attribute *attr, char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));

1354
	if (!IS_CNA_CAPABLE(vha->hw))
1355
		return scnprintf(buf, PAGE_SIZE, "\n");
1356

1357
	return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1358 1359
}

1360 1361 1362 1363 1364 1365
static ssize_t
qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
    char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));

1366
	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1367 1368
}

1369 1370 1371 1372 1373
static ssize_t
qla2x00_thermal_temp_show(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1374
	uint16_t temp = 0;
1375

1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
	if (qla2x00_reset_active(vha)) {
		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
		goto done;
	}

	if (vha->hw->flags.eeh_busy) {
		ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
		goto done;
	}

	if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
1387
		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1388

1389
done:
1390
	return scnprintf(buf, PAGE_SIZE, "\n");
1391 1392
}

1393 1394 1395 1396 1397
static ssize_t
qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
    char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1398
	int rval = QLA_FUNCTION_FAILED;
1399
	uint16_t state[6];
1400 1401 1402 1403
	uint32_t pstate;

	if (IS_QLAFX00(vha->hw)) {
		pstate = qlafx00_fw_state_show(dev, attr, buf);
1404
		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1405
	}
1406

1407
	if (qla2x00_reset_active(vha))
1408 1409
		ql_log(ql_log_warn, vha, 0x707c,
		    "ISP reset active.\n");
1410
	else if (!vha->hw->flags.eeh_busy)
1411
		rval = qla2x00_get_firmware_state(vha, state);
1412 1413 1414
	if (rval != QLA_SUCCESS)
		memset(state, -1, sizeof(state));

1415 1416
	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
	    state[0], state[1], state[2], state[3], state[4], state[5]);
1417 1418
}

1419 1420 1421 1422 1423 1424 1425
static ssize_t
qla2x00_diag_requests_show(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));

	if (!IS_BIDI_CAPABLE(vha->hw))
1426
		return scnprintf(buf, PAGE_SIZE, "\n");
1427

1428
	return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1429 1430 1431 1432 1433 1434 1435 1436 1437
}

static ssize_t
qla2x00_diag_megabytes_show(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));

	if (!IS_BIDI_CAPABLE(vha->hw))
1438
		return scnprintf(buf, PAGE_SIZE, "\n");
1439

1440
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
1441 1442 1443
	    vha->bidi_stats.transfer_bytes >> 20);
}

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
static ssize_t
qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
	char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;
	uint32_t size;

	if (!ha->fw_dumped)
		size = 0;
1454
	else if (IS_P3P_TYPE(ha))
1455 1456 1457 1458
		size = ha->md_template_size + ha->md_dump_size;
	else
		size = ha->fw_dump_len;

1459
	return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1460 1461
}

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
static ssize_t
qla2x00_allow_cna_fw_dump_show(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));

	if (!IS_P3P_TYPE(vha->hw))
		return scnprintf(buf, PAGE_SIZE, "\n");
	else
		return scnprintf(buf, PAGE_SIZE, "%s\n",
		    vha->hw->allow_cna_fw_dump ? "true" : "false");
}

static ssize_t
qla2x00_allow_cna_fw_dump_store(struct device *dev,
	struct device_attribute *attr, const char *buf, size_t count)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	int val = 0;

	if (!IS_P3P_TYPE(vha->hw))
		return -EINVAL;

	if (sscanf(buf, "%d", &val) != 1)
		return -EINVAL;

	vha->hw->allow_cna_fw_dump = val != 0;

	return strlen(buf);
}

1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
static ssize_t
qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
	char *buf)
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;

	if (!IS_QLA27XX(ha))
		return scnprintf(buf, PAGE_SIZE, "\n");

	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
	    ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
}

1507 1508 1509 1510 1511 1512 1513 1514
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1515
static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
		   qla2x00_zio_timer_store);
static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
		   qla2x00_beacon_store);
static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
		   qla2x00_optrom_bios_version_show, NULL);
static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
		   qla2x00_optrom_efi_version_show, NULL);
static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
		   qla2x00_optrom_fcode_version_show, NULL);
static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
		   NULL);
1529 1530
static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
    qla2x00_optrom_gold_fw_version_show, NULL);
1531 1532
static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
		   NULL);
1533 1534
static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
		   NULL);
1535
static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1536
static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1537 1538
static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
		   NULL);
1539 1540 1541
static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
		   qla2x00_vn_port_mac_address_show, NULL);
1542
static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1543
static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1544
static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1545 1546
static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
1547
static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
1548 1549 1550
static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
		   qla2x00_allow_cna_fw_dump_show,
		   qla2x00_allow_cna_fw_dump_store);
1551
static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561

struct device_attribute *qla2x00_host_attrs[] = {
	&dev_attr_driver_version,
	&dev_attr_fw_version,
	&dev_attr_serial_num,
	&dev_attr_isp_name,
	&dev_attr_isp_id,
	&dev_attr_model_name,
	&dev_attr_model_desc,
	&dev_attr_pci_info,
1562
	&dev_attr_link_state,
1563 1564 1565 1566 1567 1568 1569
	&dev_attr_zio,
	&dev_attr_zio_timer,
	&dev_attr_beacon,
	&dev_attr_optrom_bios_version,
	&dev_attr_optrom_efi_version,
	&dev_attr_optrom_fcode_version,
	&dev_attr_optrom_fw_version,
1570
	&dev_attr_84xx_fw_version,
1571
	&dev_attr_total_isp_aborts,
1572
	&dev_attr_mpi_version,
1573
	&dev_attr_phy_version,
1574
	&dev_attr_flash_block_size,
1575 1576
	&dev_attr_vlan_id,
	&dev_attr_vn_port_mac_address,
1577
	&dev_attr_fabric_param,
1578
	&dev_attr_fw_state,
1579
	&dev_attr_optrom_gold_fw_version,
1580
	&dev_attr_thermal_temp,
1581 1582
	&dev_attr_diag_requests,
	&dev_attr_diag_megabytes,
1583
	&dev_attr_fw_dump_size,
1584
	&dev_attr_allow_cna_fw_dump,
1585
	&dev_attr_pep_version,
1586 1587 1588
	NULL,
};

已提交
1589 1590 1591 1592 1593
/* Host attributes. */

static void
qla2x00_get_host_port_id(struct Scsi_Host *shost)
{
1594
	scsi_qla_host_t *vha = shost_priv(shost);
已提交
1595

1596 1597
	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
已提交
1598 1599
}

1600 1601 1602
static void
qla2x00_get_host_speed(struct Scsi_Host *shost)
{
1603 1604
	struct qla_hw_data *ha = ((struct scsi_qla_host *)
					(shost_priv(shost)))->hw;
1605
	u32 speed = FC_PORTSPEED_UNKNOWN;
1606

1607 1608 1609 1610 1611
	if (IS_QLAFX00(ha)) {
		qlafx00_get_host_speed(shost);
		return;
	}

1612
	switch (ha->link_data_rate) {
1613
	case PORT_SPEED_1GB:
1614
		speed = FC_PORTSPEED_1GBIT;
1615
		break;
1616
	case PORT_SPEED_2GB:
1617
		speed = FC_PORTSPEED_2GBIT;
1618
		break;
1619
	case PORT_SPEED_4GB:
1620
		speed = FC_PORTSPEED_4GBIT;
1621
		break;
1622
	case PORT_SPEED_8GB:
1623
		speed = FC_PORTSPEED_8GBIT;
1624
		break;
1625 1626 1627
	case PORT_SPEED_10GB:
		speed = FC_PORTSPEED_10GBIT;
		break;
1628 1629 1630
	case PORT_SPEED_16GB:
		speed = FC_PORTSPEED_16GBIT;
		break;
1631 1632 1633
	case PORT_SPEED_32GB:
		speed = FC_PORTSPEED_32GBIT;
		break;
1634 1635 1636 1637
	}
	fc_host_speed(shost) = speed;
}

1638 1639 1640
static void
qla2x00_get_host_port_type(struct Scsi_Host *shost)
{
1641
	scsi_qla_host_t *vha = shost_priv(shost);
1642 1643
	uint32_t port_type = FC_PORTTYPE_UNKNOWN;

1644
	if (vha->vp_idx) {
1645 1646 1647
		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
		return;
	}
1648
	switch (vha->hw->current_topology) {
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
	case ISP_CFG_NL:
		port_type = FC_PORTTYPE_LPORT;
		break;
	case ISP_CFG_FL:
		port_type = FC_PORTTYPE_NLPORT;
		break;
	case ISP_CFG_N:
		port_type = FC_PORTTYPE_PTP;
		break;
	case ISP_CFG_F:
		port_type = FC_PORTTYPE_NPORT;
		break;
	}
	fc_host_port_type(shost) = port_type;
}

已提交
1665 1666 1667 1668
static void
qla2x00_get_starget_node_name(struct scsi_target *starget)
{
	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1669
	scsi_qla_host_t *vha = shost_priv(host);
1670
	fc_port_t *fcport;
1671
	u64 node_name = 0;
已提交
1672

1673
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1674 1675
		if (fcport->rport &&
		    starget->id == fcport->rport->scsi_target_id) {
1676
			node_name = wwn_to_u64(fcport->node_name);
1677 1678 1679 1680
			break;
		}
	}

1681
	fc_starget_node_name(starget) = node_name;
已提交
1682 1683 1684 1685 1686 1687
}

static void
qla2x00_get_starget_port_name(struct scsi_target *starget)
{
	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1688
	scsi_qla_host_t *vha = shost_priv(host);
1689
	fc_port_t *fcport;
1690
	u64 port_name = 0;
已提交
1691

1692
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1693 1694
		if (fcport->rport &&
		    starget->id == fcport->rport->scsi_target_id) {
1695
			port_name = wwn_to_u64(fcport->port_name);
1696 1697 1698 1699
			break;
		}
	}

1700
	fc_starget_port_name(starget) = port_name;
已提交
1701 1702 1703 1704 1705 1706
}

static void
qla2x00_get_starget_port_id(struct scsi_target *starget)
{
	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1707
	scsi_qla_host_t *vha = shost_priv(host);
1708 1709 1710
	fc_port_t *fcport;
	uint32_t port_id = ~0U;

1711
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1712 1713
		if (fcport->rport &&
		    starget->id == fcport->rport->scsi_target_id) {
1714 1715 1716 1717 1718
			port_id = fcport->d_id.b.domain << 16 |
			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
			break;
		}
	}
已提交
1719 1720 1721 1722 1723 1724 1725 1726

	fc_starget_port_id(starget) = port_id;
}

static void
qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
	if (timeout)
1727
		rport->dev_loss_tmo = timeout;
已提交
1728
	else
1729
		rport->dev_loss_tmo = 1;
已提交
1730 1731
}

1732 1733 1734 1735 1736
static void
qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
{
	struct Scsi_Host *host = rport_to_shost(rport);
	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1737
	unsigned long flags;
1738

1739 1740 1741
	if (!fcport)
		return;

1742 1743
	/* Now that the rport has been deleted, set the fcport state to
	   FCS_DEVICE_DEAD */
1744
	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1745

1746 1747 1748 1749
	/*
	 * Transport has effectively 'deleted' the rport, clear
	 * all local references.
	 */
1750
	spin_lock_irqsave(host->host_lock, flags);
1751
	fcport->rport = fcport->drport = NULL;
1752
	*((fc_port_t **)rport->dd_data) = NULL;
1753
	spin_unlock_irqrestore(host->host_lock, flags);
1754 1755 1756 1757 1758 1759 1760 1761

	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
		return;

	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
		return;
	}
1762 1763 1764 1765 1766 1767 1768
}

static void
qla2x00_terminate_rport_io(struct fc_rport *rport)
{
	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;

1769 1770 1771
	if (!fcport)
		return;

1772 1773 1774
	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
		return;

1775 1776 1777 1778
	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
		return;
	}
1779 1780 1781 1782
	/*
	 * At this point all fcport's software-states are cleared.  Perform any
	 * final cleanup of firmware resources (PCBs and XCBs).
	 */
1783
	if (fcport->loop_id != FC_NO_LOOP_ID) {
1784 1785 1786 1787 1788 1789 1790
		if (IS_FWI2_CAPABLE(fcport->vha->hw))
			fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
			    fcport->loop_id, fcport->d_id.b.domain,
			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
		else
			qla2x00_port_logout(fcport->vha, fcport);
	}
1791 1792
}

1793 1794 1795
static int
qla2x00_issue_lip(struct Scsi_Host *shost)
{
1796
	scsi_qla_host_t *vha = shost_priv(shost);
1797

1798 1799 1800
	if (IS_QLAFX00(vha->hw))
		return 0;

1801
	qla2x00_loop_reset(vha);
1802 1803 1804
	return 0;
}

1805 1806 1807
static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
{
1808 1809 1810
	scsi_qla_host_t *vha = shost_priv(shost);
	struct qla_hw_data *ha = vha->hw;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1811
	int rval;
1812 1813
	struct link_statistics *stats;
	dma_addr_t stats_dma;
1814 1815
	struct fc_host_statistics *pfc_host_stat;

1816
	pfc_host_stat = &vha->fc_host_stat;
1817 1818
	memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));

1819 1820 1821
	if (IS_QLAFX00(vha->hw))
		goto done;

1822 1823 1824 1825 1826 1827
	if (test_bit(UNLOADING, &vha->dpc_flags))
		goto done;

	if (unlikely(pci_channel_offline(ha->pdev)))
		goto done;

1828 1829 1830
	if (qla2x00_reset_active(vha))
		goto done;

1831 1832
	stats = dma_alloc_coherent(&ha->pdev->dev,
	    sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
1833
	if (stats == NULL) {
1834 1835
		ql_log(ql_log_warn, vha, 0x707d,
		    "Failed to allocate memory for stats.\n");
1836 1837 1838 1839 1840
		goto done;
	}
	memset(stats, 0, DMA_POOL_SIZE);

	rval = QLA_FUNCTION_FAILED;
1841
	if (IS_FWI2_CAPABLE(ha)) {
1842 1843
		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1844
	    !ha->dpc_active) {
1845
		/* Must be in a 'READY' state for statistics retrieval. */
1846 1847
		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
						stats, stats_dma);
1848
	}
1849 1850

	if (rval != QLA_SUCCESS)
1851 1852 1853 1854 1855 1856 1857 1858 1859
		goto done_free;

	pfc_host_stat->link_failure_count = stats->link_fail_cnt;
	pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
	pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
	pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
	pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
	pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
	if (IS_FWI2_CAPABLE(ha)) {
1860
		pfc_host_stat->lip_count = stats->lip_cnt;
1861 1862
		pfc_host_stat->tx_frames = stats->tx_frames;
		pfc_host_stat->rx_frames = stats->rx_frames;
1863
		pfc_host_stat->dumped_frames = stats->discarded_frames;
1864
		pfc_host_stat->nos_count = stats->nos_rcvd;
1865 1866 1867 1868
		pfc_host_stat->error_frames =
			stats->dropped_frames + stats->discarded_frames;
		pfc_host_stat->rx_words = vha->qla_stats.input_bytes;
		pfc_host_stat->tx_words = vha->qla_stats.output_bytes;
1869
	}
1870 1871 1872
	pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests;
	pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests;
	pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests;
1873 1874
	pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
	pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
1875 1876 1877
	pfc_host_stat->seconds_since_last_reset =
		get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
	do_div(pfc_host_stat->seconds_since_last_reset, HZ);
1878

1879
done_free:
1880 1881
	dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
	    stats, stats_dma);
1882
done:
1883 1884 1885
	return pfc_host_stat;
}

1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
static void
qla2x00_reset_host_stats(struct Scsi_Host *shost)
{
	scsi_qla_host_t *vha = shost_priv(shost);

	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));

	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
}

1896 1897 1898
static void
qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
{
1899
	scsi_qla_host_t *vha = shost_priv(shost);
1900

1901 1902
	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
	    sizeof(fc_host_symbolic_name(shost)));
1903 1904
}

1905 1906 1907
static void
qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
{
1908
	scsi_qla_host_t *vha = shost_priv(shost);
1909

1910
	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1911 1912
}

1913 1914 1915
static void
qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
{
1916
	scsi_qla_host_t *vha = shost_priv(shost);
1917 1918 1919
	uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
		0xFF, 0xFF, 0xFF, 0xFF};
	u64 fabric_name = wwn_to_u64(node_name);
1920

1921
	if (vha->device_flags & SWITCH_FOUND)
1922
		fabric_name = wwn_to_u64(vha->fabric_node_name);
1923

1924
	fc_host_fabric_name(shost) = fabric_name;
1925 1926
}

1927 1928 1929
static void
qla2x00_get_host_port_state(struct Scsi_Host *shost)
{
1930 1931
	scsi_qla_host_t *vha = shost_priv(shost);
	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1932

1933
	if (!base_vha->flags.online) {
1934
		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
		return;
	}

	switch (atomic_read(&base_vha->loop_state)) {
	case LOOP_UPDATE:
		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
		break;
	case LOOP_DOWN:
		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
		else
			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
		break;
	case LOOP_DEAD:
		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
		break;
	case LOOP_READY:
1952
		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1953 1954 1955 1956 1957
		break;
	default:
		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
		break;
	}
1958 1959
}

1960 1961 1962 1963
static int
qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
{
	int	ret = 0;
1964
	uint8_t	qos = 0;
1965 1966
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	scsi_qla_host_t *vha = NULL;
1967
	struct qla_hw_data *ha = base_vha->hw;
1968 1969
	uint16_t options = 0;
	int	cnt;
1970
	struct req_que *req = ha->req_q_map[0];
1971 1972 1973

	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
	if (ret) {
1974 1975
		ql_log(ql_log_warn, vha, 0x707e,
		    "Vport sanity check failed, status %x\n", ret);
1976 1977 1978 1979 1980
		return (ret);
	}

	vha = qla24xx_create_vhost(fc_vport);
	if (vha == NULL) {
1981
		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1982 1983 1984 1985 1986 1987 1988 1989 1990
		return FC_VPORT_FAILED;
	}
	if (disable) {
		atomic_set(&vha->vp_state, VP_OFFLINE);
		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
	} else
		atomic_set(&vha->vp_state, VP_FAILED);

	/* ready to create vport */
1991 1992
	ql_log(ql_log_info, vha, 0x7080,
	    "VP entry id %d assigned.\n", vha->vp_idx);
1993 1994 1995 1996 1997 1998

	/* initialized vport states */
	atomic_set(&vha->loop_state, LOOP_DOWN);
	vha->vp_err_state=  VP_ERR_PORTDWN;
	vha->vp_prev_err_state=  VP_ERR_UNKWN;
	/* Check if physical ha port is Up */
1999 2000
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2001
		/* Don't retry or attempt login of this virtual port */
2002 2003
		ql_dbg(ql_dbg_user, vha, 0x7081,
		    "Vport loop state is not UP.\n");
2004 2005 2006 2007 2008
		atomic_set(&vha->loop_state, LOOP_DEAD);
		if (!disable)
			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
	}

2009
	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2010
		if (ha->fw_attributes & BIT_4) {
2011
			int prot = 0, guard;
2012
			vha->flags.difdix_supported = 1;
2013 2014
			ql_dbg(ql_dbg_user, vha, 0x7082,
			    "Registered for DIF/DIX type 1 and 3 protection.\n");
2015 2016
			if (ql2xenabledif == 1)
				prot = SHOST_DIX_TYPE0_PROTECTION;
2017
			scsi_host_set_prot(vha->host,
2018
			    prot | SHOST_DIF_TYPE1_PROTECTION
2019
			    | SHOST_DIF_TYPE2_PROTECTION
2020 2021
			    | SHOST_DIF_TYPE3_PROTECTION
			    | SHOST_DIX_TYPE1_PROTECTION
2022
			    | SHOST_DIX_TYPE2_PROTECTION
2023
			    | SHOST_DIX_TYPE3_PROTECTION);
2024 2025 2026 2027 2028 2029 2030 2031

			guard = SHOST_DIX_GUARD_CRC;

			if (IS_PI_IPGUARD_CAPABLE(ha) &&
			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
				guard |= SHOST_DIX_GUARD_IP;

			scsi_host_set_guard(vha->host, guard);
2032 2033 2034 2035
		} else
			vha->flags.difdix_supported = 0;
	}

2036 2037
	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
				   &ha->pdev->dev)) {
2038 2039
		ql_dbg(ql_dbg_user, vha, 0x7083,
		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2040 2041 2042 2043
		goto vport_create_failed_2;
	}

	/* initialize attributes */
2044
	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2045 2046 2047
	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
	fc_host_supported_classes(vha->host) =
2048
		fc_host_supported_classes(base_vha->host);
2049
	fc_host_supported_speeds(vha->host) =
2050
		fc_host_supported_speeds(base_vha->host);
2051

2052
	qlt_vport_create(vha, ha);
2053 2054
	qla24xx_vport_disable(fc_vport, disable);

2055
	if (ha->flags.cpu_affinity_enabled) {
2056
		req = ha->req_q_map[1];
2057 2058 2059 2060
		ql_dbg(ql_dbg_multiq, vha, 0xc000,
		    "Request queue %p attached with "
		    "VP[%d], cpu affinity =%d\n",
		    req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
2061 2062
		goto vport_queue;
	} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
2063 2064
		goto vport_queue;
	/* Create a request queue in QoS mode for the vport */
2065 2066 2067
	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2068
					8) == 0) {
2069 2070
			qos = ha->npiv_info[cnt].q_qos;
			break;
2071
		}
2072
	}
2073

2074 2075 2076 2077
	if (qos) {
		ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
			qos);
		if (!ret)
2078 2079 2080
			ql_log(ql_log_warn, vha, 0x7084,
			    "Can't create request queue for VP[%d]\n",
			    vha->vp_idx);
2081
		else {
2082 2083 2084 2085 2086 2087
			ql_dbg(ql_dbg_multiq, vha, 0xc001,
			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
			    ret, qos, vha->vp_idx);
			ql_dbg(ql_dbg_user, vha, 0x7085,
			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
			    ret, qos, vha->vp_idx);
2088 2089
			req = ha->req_q_map[ret];
		}
2090 2091
	}

2092
vport_queue:
2093
	vha->req = req;
2094
	return 0;
2095

2096 2097 2098 2099 2100 2101 2102
vport_create_failed_2:
	qla24xx_disable_vp(vha);
	qla24xx_deallocate_vp_id(vha);
	scsi_host_put(vha->host);
	return FC_VPORT_FAILED;
}

A
Adrian Bunk 已提交
2103
static int
2104 2105 2106
qla24xx_vport_delete(struct fc_vport *fc_vport)
{
	scsi_qla_host_t *vha = fc_vport->dd_data;
2107 2108
	struct qla_hw_data *ha = vha->hw;
	uint16_t id = vha->vp_idx;
2109 2110

	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
2111
	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2112
		msleep(1000);
2113 2114 2115

	qla24xx_disable_vp(vha);

2116 2117
	vha->flags.delete_progress = 1;

2118 2119
	qlt_remove_target(ha, vha);

2120 2121 2122 2123
	fc_remove_host(vha->host);

	scsi_remove_host(vha->host);

2124 2125 2126
	/* Allow timer to run to drain queued items, when removing vp */
	qla24xx_deallocate_vp_id(vha);

2127 2128
	if (vha->timer_active) {
		qla2x00_vp_stop_timer(vha);
2129 2130
		ql_dbg(ql_dbg_user, vha, 0x7086,
		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2131
	}
2132

2133 2134 2135 2136
	BUG_ON(atomic_read(&vha->vref_count));

	qla2x00_free_fcports(vha);

2137 2138 2139 2140 2141
	mutex_lock(&ha->vport_lock);
	ha->cur_vport_count--;
	clear_bit(vha->vp_idx, ha->vp_idx_map);
	mutex_unlock(&ha->vport_lock);

2142
	if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
2143
		if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
2144 2145
			ql_log(ql_log_warn, vha, 0x7087,
			    "Queue delete failed.\n");
2146 2147
	}

2148
	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
2149
	scsi_host_put(vha->host);
2150 2151 2152
	return 0;
}

A
Adrian Bunk 已提交
2153
static int
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165
qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
{
	scsi_qla_host_t *vha = fc_vport->dd_data;

	if (disable)
		qla24xx_disable_vp(vha);
	else
		qla24xx_enable_vp(vha);

	return 0;
}

2166
struct fc_function_template qla2xxx_transport_functions = {
已提交
2167 2168 2169

	.show_host_node_name = 1,
	.show_host_port_name = 1,
2170
	.show_host_supported_classes = 1,
2171
	.show_host_supported_speeds = 1,
2172

已提交
2173 2174
	.get_host_port_id = qla2x00_get_host_port_id,
	.show_host_port_id = 1,
2175 2176
	.get_host_speed = qla2x00_get_host_speed,
	.show_host_speed = 1,
2177 2178
	.get_host_port_type = qla2x00_get_host_port_type,
	.show_host_port_type = 1,
2179 2180
	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
	.show_host_symbolic_name = 1,
2181 2182
	.set_host_system_hostname = qla2x00_set_host_system_hostname,
	.show_host_system_hostname = 1,
2183 2184
	.get_host_fabric_name = qla2x00_get_host_fabric_name,
	.show_host_fabric_name = 1,
2185 2186
	.get_host_port_state = qla2x00_get_host_port_state,
	.show_host_port_state = 1,
已提交
2187

2188
	.dd_fcrport_size = sizeof(struct fc_port *),
2189
	.show_rport_supported_classes = 1,
已提交
2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200

	.get_starget_node_name = qla2x00_get_starget_node_name,
	.show_starget_node_name = 1,
	.get_starget_port_name = qla2x00_get_starget_port_name,
	.show_starget_port_name = 1,
	.get_starget_port_id  = qla2x00_get_starget_port_id,
	.show_starget_port_id = 1,

	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
	.show_rport_dev_loss_tmo = 1,

2201
	.issue_fc_host_lip = qla2x00_issue_lip,
2202 2203
	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
	.terminate_rport_io = qla2x00_terminate_rport_io,
2204
	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2205
	.reset_fc_host_stats = qla2x00_reset_host_stats,
2206 2207 2208 2209

	.vport_create = qla24xx_vport_create,
	.vport_disable = qla24xx_vport_disable,
	.vport_delete = qla24xx_vport_delete,
2210 2211
	.bsg_request = qla24xx_bsg_request,
	.bsg_timeout = qla24xx_bsg_timeout,
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
};

struct fc_function_template qla2xxx_transport_vport_functions = {

	.show_host_node_name = 1,
	.show_host_port_name = 1,
	.show_host_supported_classes = 1,

	.get_host_port_id = qla2x00_get_host_port_id,
	.show_host_port_id = 1,
	.get_host_speed = qla2x00_get_host_speed,
	.show_host_speed = 1,
	.get_host_port_type = qla2x00_get_host_port_type,
	.show_host_port_type = 1,
	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
	.show_host_symbolic_name = 1,
	.set_host_system_hostname = qla2x00_set_host_system_hostname,
	.show_host_system_hostname = 1,
	.get_host_fabric_name = qla2x00_get_host_fabric_name,
	.show_host_fabric_name = 1,
	.get_host_port_state = qla2x00_get_host_port_state,
	.show_host_port_state = 1,

	.dd_fcrport_size = sizeof(struct fc_port *),
	.show_rport_supported_classes = 1,

	.get_starget_node_name = qla2x00_get_starget_node_name,
	.show_starget_node_name = 1,
	.get_starget_port_name = qla2x00_get_starget_port_name,
	.show_starget_port_name = 1,
	.get_starget_port_id  = qla2x00_get_starget_port_id,
	.show_starget_port_id = 1,

	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
	.show_rport_dev_loss_tmo = 1,

	.issue_fc_host_lip = qla2x00_issue_lip,
2249 2250
	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
	.terminate_rport_io = qla2x00_terminate_rport_io,
2251
	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2252 2253
	.reset_fc_host_stats = qla2x00_reset_host_stats,

2254 2255
	.bsg_request = qla24xx_bsg_request,
	.bsg_timeout = qla24xx_bsg_timeout,
已提交
2256 2257 2258
};

void
2259
qla2x00_init_host_attr(scsi_qla_host_t *vha)
已提交
2260
{
2261
	struct qla_hw_data *ha = vha->hw;
2262 2263
	u32 speed = FC_PORTSPEED_UNKNOWN;

2264
	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2265 2266
	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2267 2268
	fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
			(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
2269 2270
	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2271

2272
	if (IS_CNA_CAPABLE(ha))
2273
		speed = FC_PORTSPEED_10GBIT;
2274 2275 2276
	else if (IS_QLA2031(ha))
		speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
		    FC_PORTSPEED_4GBIT;
2277
	else if (IS_QLA25XX(ha))
2278 2279
		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2280
	else if (IS_QLA24XX_TYPE(ha))
2281 2282 2283 2284
		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
		    FC_PORTSPEED_1GBIT;
	else if (IS_QLA23XX(ha))
		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2285 2286 2287
	else if (IS_QLAFX00(ha))
		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2288 2289 2290
	else if (IS_QLA27XX(ha))
		speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
		    FC_PORTSPEED_8GBIT;
2291 2292
	else
		speed = FC_PORTSPEED_1GBIT;
2293
	fc_host_supported_speeds(vha->host) = speed;
已提交
2294
}