qla_isr.c 90.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
A
Andrew Vasquez 已提交
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2014 QLogic Corporation
L
Linus Torvalds 已提交
4
 *
A
Andrew Vasquez 已提交
5
 * See LICENSE.qla2xxx for copyright and licensing details.
L
Linus Torvalds 已提交
6 7
 */
#include "qla_def.h"
8
#include "qla_target.h"
L
Linus Torvalds 已提交
9

10
#include <linux/delay.h>
11
#include <linux/slab.h>
12
#include <linux/t10-pi.h>
13
#include <scsi/scsi_tcq.h>
14
#include <scsi/scsi_bsg_fc.h>
15
#include <scsi/scsi_eh.h>
16

L
Linus Torvalds 已提交
17
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
18
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20 21
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
	sts_entry_t *);
22 23 24 25
static void qla_irq_affinity_notify(struct irq_affinity_notify *,
    const cpumask_t *);
static void qla_irq_affinity_release(struct kref *);

26

L
Linus Torvalds 已提交
27 28 29 30 31 32 33 34 35 36
/**
 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
 * @irq:
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
37
qla2100_intr_handler(int irq, void *dev_id)
L
Linus Torvalds 已提交
38
{
39 40
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
41
	struct device_reg_2xxx __iomem *reg;
L
Linus Torvalds 已提交
42 43
	int		status;
	unsigned long	iter;
44
	uint16_t	hccr;
45
	uint16_t	mb[4];
46
	struct rsp_que *rsp;
47
	unsigned long	flags;
L
Linus Torvalds 已提交
48

49 50
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
51 52
		ql_log(ql_log_info, NULL, 0x505d,
		    "%s: NULL response queue pointer.\n", __func__);
L
Linus Torvalds 已提交
53 54 55
		return (IRQ_NONE);
	}

56
	ha = rsp->hw;
57
	reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
58 59
	status = 0;

60
	spin_lock_irqsave(&ha->hardware_lock, flags);
61
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
62
	for (iter = 50; iter--; ) {
63
		hccr = RD_REG_WORD(&reg->hccr);
64
		if (qla2x00_check_reg16_for_disconnect(vha, hccr))
65
			break;
66 67 68 69 70 71
		if (hccr & HCCR_RISC_PAUSE) {
			if (pci_channel_offline(ha->pdev))
				break;

			/*
			 * Issue a "HARD" reset in order for the RISC interrupt
72
			 * bit to be cleared.  Schedule a big hammer to get
73 74 75 76 77
			 * out of the RISC PAUSED state.
			 */
			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
			RD_REG_WORD(&reg->hccr);

78 79
			ha->isp_ops->fw_dump(vha, 1);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
80 81
			break;
		} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
L
Linus Torvalds 已提交
82 83 84 85 86 87 88
			break;

		if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
			RD_REG_WORD(&reg->hccr);

			/* Get mailbox data. */
89 90
			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
91
				qla2x00_mbx_completion(vha, mb[0]);
L
Linus Torvalds 已提交
92
				status |= MBX_INTERRUPT;
93 94 95 96
			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
97
				qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
98 99
			} else {
				/*EMPTY*/
100 101 102
				ql_dbg(ql_dbg_async, vha, 0x5025,
				    "Unrecognized interrupt type (%d).\n",
				    mb[0]);
L
Linus Torvalds 已提交
103 104 105 106 107
			}
			/* Release mailbox registers. */
			WRT_REG_WORD(&reg->semaphore, 0);
			RD_REG_WORD(&reg->semaphore);
		} else {
108
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
109 110 111 112 113

			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
			RD_REG_WORD(&reg->hccr);
		}
	}
114
	qla2x00_handle_mbx_completion(ha, status);
115
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
116 117 118 119

	return (IRQ_HANDLED);
}

120
bool
121
qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
122 123
{
	/* Check for PCI disconnection */
124
	if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
125
		if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
126 127
		    !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
		    !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
128 129 130 131 132 133 134
			/*
			 * Schedule this (only once) on the default system
			 * workqueue so that all the adapter workqueues and the
			 * DPC thread can be shutdown cleanly.
			 */
			schedule_work(&vha->hw->board_disable);
		}
135 136 137 138 139
		return true;
	} else
		return false;
}

140 141 142 143 144 145
bool
qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
{
	return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
}

L
Linus Torvalds 已提交
146 147 148 149 150 151 152 153 154 155
/**
 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
 * @irq:
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
156
qla2300_intr_handler(int irq, void *dev_id)
L
Linus Torvalds 已提交
157
{
158
	scsi_qla_host_t	*vha;
159
	struct device_reg_2xxx __iomem *reg;
L
Linus Torvalds 已提交
160 161 162 163
	int		status;
	unsigned long	iter;
	uint32_t	stat;
	uint16_t	hccr;
164
	uint16_t	mb[4];
165 166
	struct rsp_que *rsp;
	struct qla_hw_data *ha;
167
	unsigned long	flags;
L
Linus Torvalds 已提交
168

169 170
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
171 172
		ql_log(ql_log_info, NULL, 0x5058,
		    "%s: NULL response queue pointer.\n", __func__);
L
Linus Torvalds 已提交
173 174 175
		return (IRQ_NONE);
	}

176
	ha = rsp->hw;
177
	reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
178 179
	status = 0;

180
	spin_lock_irqsave(&ha->hardware_lock, flags);
181
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
182 183
	for (iter = 50; iter--; ) {
		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
184
		if (qla2x00_check_reg32_for_disconnect(vha, stat))
185
			break;
L
Linus Torvalds 已提交
186
		if (stat & HSR_RISC_PAUSED) {
187
			if (unlikely(pci_channel_offline(ha->pdev)))
188 189
				break;

L
Linus Torvalds 已提交
190
			hccr = RD_REG_WORD(&reg->hccr);
191

L
Linus Torvalds 已提交
192
			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
193 194 195
				ql_log(ql_log_warn, vha, 0x5026,
				    "Parity error -- HCCR=%x, Dumping "
				    "firmware.\n", hccr);
L
Linus Torvalds 已提交
196
			else
197 198 199
				ql_log(ql_log_warn, vha, 0x5027,
				    "RISC paused -- HCCR=%x, Dumping "
				    "firmware.\n", hccr);
L
Linus Torvalds 已提交
200 201 202 203

			/*
			 * Issue a "HARD" reset in order for the RISC
			 * interrupt bit to be cleared.  Schedule a big
204
			 * hammer to get out of the RISC PAUSED state.
L
Linus Torvalds 已提交
205 206 207
			 */
			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
			RD_REG_WORD(&reg->hccr);
208

209 210
			ha->isp_ops->fw_dump(vha, 1);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
211 212 213 214 215 216 217 218 219
			break;
		} else if ((stat & HSR_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
		case 0x1:
		case 0x2:
		case 0x10:
		case 0x11:
220
			qla2x00_mbx_completion(vha, MSW(stat));
L
Linus Torvalds 已提交
221 222 223 224 225 226
			status |= MBX_INTERRUPT;

			/* Release mailbox registers. */
			WRT_REG_WORD(&reg->semaphore, 0);
			break;
		case 0x12:
227 228 229 230
			mb[0] = MSW(stat);
			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
231
			qla2x00_async_event(vha, rsp, mb);
232 233
			break;
		case 0x13:
234
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
235 236
			break;
		case 0x15:
237 238
			mb[0] = MBA_CMPLT_1_16BIT;
			mb[1] = MSW(stat);
239
			qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
240 241
			break;
		case 0x16:
242 243 244
			mb[0] = MBA_SCSI_COMPLETION;
			mb[1] = MSW(stat);
			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
245
			qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
246 247
			break;
		default:
248 249
			ql_dbg(ql_dbg_async, vha, 0x5028,
			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
L
Linus Torvalds 已提交
250 251 252 253 254
			break;
		}
		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
		RD_REG_WORD_RELAXED(&reg->hccr);
	}
255
	qla2x00_handle_mbx_completion(ha, status);
256
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
257 258 259 260 261 262 263 264 265 266

	return (IRQ_HANDLED);
}

/**
 * qla2x00_mbx_completion() - Process mailbox command completions.
 * @ha: SCSI driver HA context
 * @mb0: Mailbox0 register
 */
static void
267
qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
L
Linus Torvalds 已提交
268 269
{
	uint16_t	cnt;
270
	uint32_t	mboxes;
L
Linus Torvalds 已提交
271
	uint16_t __iomem *wptr;
272
	struct qla_hw_data *ha = vha->hw;
273
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
274

275 276 277
	/* Read all mbox registers? */
	mboxes = (1 << ha->mbx_count) - 1;
	if (!ha->mcp)
278
		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
279 280 281
	else
		mboxes = ha->mcp->in_mb;

L
Linus Torvalds 已提交
282 283 284
	/* Load return mailbox registers. */
	ha->flags.mbox_int = 1;
	ha->mailbox_out[0] = mb0;
285
	mboxes >>= 1;
L
Linus Torvalds 已提交
286 287 288
	wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);

	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
A
Andrew Vasquez 已提交
289
		if (IS_QLA2200(ha) && cnt == 8)
L
Linus Torvalds 已提交
290
			wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
291
		if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
L
Linus Torvalds 已提交
292
			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
293
		else if (mboxes & BIT_0)
L
Linus Torvalds 已提交
294
			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
A
Andrew Vasquez 已提交
295

L
Linus Torvalds 已提交
296
		wptr++;
297
		mboxes >>= 1;
L
Linus Torvalds 已提交
298 299 300
	}
}

301 302 303 304 305 306 307
static void
qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
{
	static char *event[] =
		{ "Complete", "Request Notification", "Time Extension" };
	int rval;
	struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
308
	struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
309 310 311 312
	uint16_t __iomem *wptr;
	uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];

	/* Seed data -- mailbox1 -> mailbox7. */
313 314 315 316 317 318 319
	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
		wptr = (uint16_t __iomem *)&reg24->mailbox1;
	else if (IS_QLA8044(vha->hw))
		wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
	else
		return;

320 321 322
	for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
		mb[cnt] = RD_REG_WORD(wptr);

323
	ql_dbg(ql_dbg_async, vha, 0x5021,
324
	    "Inter-Driver Communication %s -- "
325 326 327
	    "%04x %04x %04x %04x %04x %04x %04x.\n",
	    event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
	    mb[4], mb[5], mb[6]);
328 329 330 331 332
	switch (aen) {
	/* Handle IDC Error completion case. */
	case MBA_IDC_COMPLETE:
		if (mb[1] >> 15) {
			vha->hw->flags.idc_compl_status = 1;
333
			if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
				complete(&vha->hw->dcbx_comp);
		}
		break;

	case MBA_IDC_NOTIFY:
		/* Acknowledgement needed? [Notify && non-zero timeout]. */
		timeout = (descr >> 8) & 0xf;
		ql_dbg(ql_dbg_async, vha, 0x5022,
		    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
		    vha->host_no, event[aen & 0xff], timeout);

		if (!timeout)
			return;
		rval = qla2x00_post_idc_ack_work(vha, mb);
		if (rval != QLA_SUCCESS)
			ql_log(ql_log_warn, vha, 0x5023,
			    "IDC failed to post ACK.\n");
		break;
	case MBA_IDC_TIME_EXT:
		vha->hw->idc_extend_tmo = descr;
		ql_dbg(ql_dbg_async, vha, 0x5087,
		    "%lu Inter-Driver Communication %s -- "
		    "Extend timeout by=%d.\n",
		    vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
		break;
359
	}
360 361
}

362
#define LS_UNKNOWN	2
363 364
const char *
qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
365
{
366 367
	static const char *const link_speeds[] = {
		"1", "2", "?", "4", "8", "16", "32", "10"
368
	};
369
#define	QLA_LAST_SPEED	7
370 371

	if (IS_QLA2100(ha) || IS_QLA2200(ha))
372 373
		return link_speeds[0];
	else if (speed == 0x13)
374 375
		return link_speeds[QLA_LAST_SPEED];
	else if (speed < QLA_LAST_SPEED)
376 377 378
		return link_speeds[speed];
	else
		return link_speeds[LS_UNKNOWN];
379 380
}

381
static void
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
{
	struct qla_hw_data *ha = vha->hw;

	/*
	 * 8200 AEN Interpretation:
	 * mb[0] = AEN code
	 * mb[1] = AEN Reason code
	 * mb[2] = LSW of Peg-Halt Status-1 Register
	 * mb[6] = MSW of Peg-Halt Status-1 Register
	 * mb[3] = LSW of Peg-Halt Status-2 register
	 * mb[7] = MSW of Peg-Halt Status-2 register
	 * mb[4] = IDC Device-State Register value
	 * mb[5] = IDC Driver-Presence Register value
	 */
	ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
	    "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
	    mb[0], mb[1], mb[2], mb[6]);
	ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
	    "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
	    "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);

	if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
				IDC_HEARTBEAT_FAILURE)) {
		ha->flags.nic_core_hung = 1;
		ql_log(ql_log_warn, vha, 0x5060,
		    "83XX: F/W Error Reported: Check if reset required.\n");

		if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
			uint32_t protocol_engine_id, fw_err_code, err_level;

			/*
			 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
			 *  - PEG-Halt Status-1 Register:
			 *	(LSW = mb[2], MSW = mb[6])
			 *	Bits 0-7   = protocol-engine ID
			 *	Bits 8-28  = f/w error code
			 *	Bits 29-31 = Error-level
			 *	    Error-level 0x1 = Non-Fatal error
			 *	    Error-level 0x2 = Recoverable Fatal error
			 *	    Error-level 0x4 = UnRecoverable Fatal error
			 *  - PEG-Halt Status-2 Register:
			 *	(LSW = mb[3], MSW = mb[7])
			 */
			protocol_engine_id = (mb[2] & 0xff);
			fw_err_code = (((mb[2] & 0xff00) >> 8) |
			    ((mb[6] & 0x1fff) << 8));
			err_level = ((mb[6] & 0xe000) >> 13);
			ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
			    "Register: protocol_engine_id=0x%x "
			    "fw_err_code=0x%x err_level=0x%x.\n",
			    protocol_engine_id, fw_err_code, err_level);
			ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
			    "Register: 0x%x%x.\n", mb[7], mb[3]);
			if (err_level == ERR_LEVEL_NON_FATAL) {
				ql_log(ql_log_warn, vha, 0x5063,
				    "Not a fatal error, f/w has recovered "
				    "iteself.\n");
			} else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
				ql_log(ql_log_fatal, vha, 0x5064,
				    "Recoverable Fatal error: Chip reset "
				    "required.\n");
				qla83xx_schedule_work(vha,
				    QLA83XX_NIC_CORE_RESET);
			} else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
				ql_log(ql_log_fatal, vha, 0x5065,
				    "Unrecoverable Fatal error: Set FAILED "
				    "state, reboot required.\n");
				qla83xx_schedule_work(vha,
				    QLA83XX_NIC_CORE_UNRECOVERABLE);
			}
		}

		if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
			uint16_t peg_fw_state, nw_interface_link_up;
			uint16_t nw_interface_signal_detect, sfp_status;
			uint16_t htbt_counter, htbt_monitor_enable;
			uint16_t sfp_additonal_info, sfp_multirate;
			uint16_t sfp_tx_fault, link_speed, dcbx_status;

			/*
			 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
			 *  - PEG-to-FC Status Register:
			 *	(LSW = mb[2], MSW = mb[6])
			 *	Bits 0-7   = Peg-Firmware state
			 *	Bit 8      = N/W Interface Link-up
			 *	Bit 9      = N/W Interface signal detected
			 *	Bits 10-11 = SFP Status
			 *	  SFP Status 0x0 = SFP+ transceiver not expected
			 *	  SFP Status 0x1 = SFP+ transceiver not present
			 *	  SFP Status 0x2 = SFP+ transceiver invalid
			 *	  SFP Status 0x3 = SFP+ transceiver present and
			 *	  valid
			 *	Bits 12-14 = Heartbeat Counter
			 *	Bit 15     = Heartbeat Monitor Enable
			 *	Bits 16-17 = SFP Additional Info
			 *	  SFP info 0x0 = Unregocnized transceiver for
			 *	  Ethernet
			 *	  SFP info 0x1 = SFP+ brand validation failed
			 *	  SFP info 0x2 = SFP+ speed validation failed
			 *	  SFP info 0x3 = SFP+ access error
			 *	Bit 18     = SFP Multirate
			 *	Bit 19     = SFP Tx Fault
			 *	Bits 20-22 = Link Speed
			 *	Bits 23-27 = Reserved
			 *	Bits 28-30 = DCBX Status
			 *	  DCBX Status 0x0 = DCBX Disabled
			 *	  DCBX Status 0x1 = DCBX Enabled
			 *	  DCBX Status 0x2 = DCBX Exchange error
			 *	Bit 31     = Reserved
			 */
			peg_fw_state = (mb[2] & 0x00ff);
			nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
			nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
			sfp_status = ((mb[2] & 0x0c00) >> 10);
			htbt_counter = ((mb[2] & 0x7000) >> 12);
			htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
			sfp_additonal_info = (mb[6] & 0x0003);
			sfp_multirate = ((mb[6] & 0x0004) >> 2);
			sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
			link_speed = ((mb[6] & 0x0070) >> 4);
			dcbx_status = ((mb[6] & 0x7000) >> 12);

			ql_log(ql_log_warn, vha, 0x5066,
			    "Peg-to-Fc Status Register:\n"
			    "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
			    "nw_interface_signal_detect=0x%x"
			    "\nsfp_statis=0x%x.\n ", peg_fw_state,
			    nw_interface_link_up, nw_interface_signal_detect,
			    sfp_status);
			ql_log(ql_log_warn, vha, 0x5067,
			    "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
			    "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
			    htbt_counter, htbt_monitor_enable,
			    sfp_additonal_info, sfp_multirate);
			ql_log(ql_log_warn, vha, 0x5068,
			    "sfp_tx_fault=0x%x, link_state=0x%x, "
			    "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
			    dcbx_status);

			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
		}

		if (mb[1] & IDC_HEARTBEAT_FAILURE) {
			ql_log(ql_log_warn, vha, 0x5069,
			    "Heartbeat Failure encountered, chip reset "
			    "required.\n");

			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
		}
	}

	if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
		ql_log(ql_log_info, vha, 0x506a,
		    "IDC Device-State changed = 0x%x.\n", mb[4]);
537 538
		if (ha->flags.nic_core_reset_owner)
			return;
539 540 541 542
		qla83xx_schedule_work(vha, MBA_IDC_AEN);
	}
}

543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
int
qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
{
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
	uint32_t vp_did;
	unsigned long flags;
	int ret = 0;

	if (!ha->num_vhosts)
		return ret;

	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		vp_did = vp->d_id.b24;
		if (vp_did == rscn_entry) {
			ret = 1;
			break;
		}
	}
	spin_unlock_irqrestore(&ha->vport_slock, flags);

	return ret;
}

568 569 570 571 572 573 574 575 576 577 578
static inline fc_port_t *
qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
{
	fc_port_t *fcport;

	list_for_each_entry(fcport, &vha->vp_fcports, list)
		if (fcport->loop_id == loop_id)
			return fcport;
	return NULL;
}

L
Linus Torvalds 已提交
579 580 581
/**
 * qla2x00_async_event() - Process aynchronous events.
 * @ha: SCSI driver HA context
582
 * @mb: Mailbox registers (0 - 3)
L
Linus Torvalds 已提交
583
 */
584
void
585
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
L
Linus Torvalds 已提交
586 587
{
	uint16_t	handle_cnt;
588
	uint16_t	cnt, mbx;
L
Linus Torvalds 已提交
589
	uint32_t	handles[5];
590
	struct qla_hw_data *ha = vha->hw;
591
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
592
	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
593
	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
594
	uint32_t	rscn_entry, host_pid;
595
	unsigned long	flags;
596
	fc_port_t	*fcport = NULL;
L
Linus Torvalds 已提交
597 598 599

	/* Setup to process RIO completion. */
	handle_cnt = 0;
600
	if (IS_CNA_CAPABLE(ha))
601
		goto skip_rio;
L
Linus Torvalds 已提交
602 603
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:
604
		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
L
Linus Torvalds 已提交
605 606 607
		handle_cnt = 1;
		break;
	case MBA_CMPLT_1_16BIT:
608
		handles[0] = mb[1];
L
Linus Torvalds 已提交
609 610 611 612
		handle_cnt = 1;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_16BIT:
613 614
		handles[0] = mb[1];
		handles[1] = mb[2];
L
Linus Torvalds 已提交
615 616 617 618
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_3_16BIT:
619 620 621
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
622 623 624 625
		handle_cnt = 3;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_4_16BIT:
626 627 628
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
629 630 631 632 633
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handle_cnt = 4;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_5_16BIT:
634 635 636
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
637 638 639 640 641 642
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
		handle_cnt = 5;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_32BIT:
643
		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
L
Linus Torvalds 已提交
644 645 646 647 648 649 650 651 652
		handles[1] = le32_to_cpu(
		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
		    RD_MAILBOX_REG(ha, reg, 6));
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	default:
		break;
	}
653
skip_rio:
L
Linus Torvalds 已提交
654 655
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:	/* Fast Post */
656
		if (!vha->flags.online)
L
Linus Torvalds 已提交
657 658 659
			break;

		for (cnt = 0; cnt < handle_cnt; cnt++)
660 661
			qla2x00_process_completed_request(vha, rsp->req,
				handles[cnt]);
L
Linus Torvalds 已提交
662 663 664
		break;

	case MBA_RESET:			/* Reset */
665 666
		ql_dbg(ql_dbg_async, vha, 0x5002,
		    "Asynchronous RESET.\n");
L
Linus Torvalds 已提交
667

668
		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
669 670 671
		break;

	case MBA_SYSTEM_ERR:		/* System Error */
672
		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
673
			RD_REG_WORD(&reg24->mailbox7) : 0;
674
		ql_log(ql_log_warn, vha, 0x5003,
675 676
		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
		    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
L
Linus Torvalds 已提交
677

678
		ha->isp_ops->fw_dump(vha, 1);
L
Linus Torvalds 已提交
679

680
		if (IS_FWI2_CAPABLE(ha)) {
681
			if (mb[1] == 0 && mb[2] == 0) {
682
				ql_log(ql_log_fatal, vha, 0x5004,
683 684
				    "Unrecoverable Hardware Error: adapter "
				    "marked OFFLINE!\n");
685
				vha->flags.online = 0;
686
				vha->device_flags |= DFLG_DEV_FAILED;
687
			} else {
L
Lucas De Marchi 已提交
688
				/* Check to see if MPI timeout occurred */
689
				if ((mbx & MBX_3) && (ha->port_no == 0))
690 691 692
					set_bit(MPI_RESET_NEEDED,
					    &vha->dpc_flags);

693
				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
694
			}
695
		} else if (mb[1] == 0) {
696
			ql_log(ql_log_fatal, vha, 0x5005,
L
Linus Torvalds 已提交
697 698
			    "Unrecoverable Hardware Error: adapter marked "
			    "OFFLINE!\n");
699
			vha->flags.online = 0;
700
			vha->device_flags |= DFLG_DEV_FAILED;
L
Linus Torvalds 已提交
701
		} else
702
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
703 704 705
		break;

	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
706 707
		ql_log(ql_log_warn, vha, 0x5006,
		    "ISP Request Transfer Error (%x).\n",  mb[1]);
L
Linus Torvalds 已提交
708

709
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
710 711 712
		break;

	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
713
		ql_log(ql_log_warn, vha, 0x5007,
714
		    "ISP Response Transfer Error (%x).\n", mb[1]);
L
Linus Torvalds 已提交
715

716
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
717 718 719
		break;

	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
720
		ql_dbg(ql_dbg_async, vha, 0x5008,
721 722
		    "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
		break;
L
Linus Torvalds 已提交
723

724
	case MBA_LOOP_INIT_ERR:
725
		ql_log(ql_log_warn, vha, 0x5090,
726 727 728
		    "LOOP INIT ERROR (%x).\n", mb[1]);
		ha->isp_ops->fw_dump(vha, 1);
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
729
		break;
730

L
Linus Torvalds 已提交
731
	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
732
		ql_dbg(ql_dbg_async, vha, 0x5009,
733
		    "LIP occurred (%x).\n", mb[1]);
L
Linus Torvalds 已提交
734

735 736 737 738
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
739 740
		}

741 742 743
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
744 745
		}

746 747
		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
748

749 750
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
L
Linus Torvalds 已提交
751 752 753
		break;

	case MBA_LOOP_UP:		/* Loop Up Event */
754
		if (IS_QLA2100(ha) || IS_QLA2200(ha))
755
			ha->link_data_rate = PORT_SPEED_1GB;
756
		else
L
Linus Torvalds 已提交
757 758
			ha->link_data_rate = mb[1];

759
		ql_log(ql_log_info, vha, 0x500a,
760
		    "LOOP UP detected (%s Gbps).\n",
761
		    qla2x00_get_link_speed_str(ha, ha->link_data_rate));
L
Linus Torvalds 已提交
762

763 764
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
L
Linus Torvalds 已提交
765 766 767
		break;

	case MBA_LOOP_DOWN:		/* Loop Down Event */
768 769
		mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
			? RD_REG_WORD(&reg24->mailbox4) : 0;
770 771
		mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
			: mbx;
772
		ql_log(ql_log_info, vha, 0x500b,
773 774
		    "LOOP DOWN detected (%x %x %x %x).\n",
		    mb[1], mb[2], mb[3], mbx);
L
Linus Torvalds 已提交
775

776 777 778
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
779 780 781
			/*
			 * In case of loop down, restore WWPN from
			 * NVRAM in case of FA-WWPN capable ISP
782
			 * Restore for Physical Port only
783
			 */
784 785 786 787 788 789 790 791 792 793 794 795 796
			if (!vha->vp_idx) {
				if (ha->flags.fawwpn_enabled) {
					void *wwpn = ha->init_cb->port_name;
					memcpy(vha->port_name, wwpn, WWN_SIZE);
					fc_host_port_name(vha->host) =
					    wwn_to_u64(vha->port_name);
					ql_dbg(ql_dbg_init + ql_dbg_verbose,
					    vha, 0x0144, "LOOP DOWN detected,"
					    "restore WWPN %016llx\n",
					    wwn_to_u64(vha->port_name));
				}

				clear_bit(VP_CONFIG_OK, &vha->vp_flags);
797 798
			}

799 800
			vha->device_flags |= DFLG_NO_CABLE;
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
801 802
		}

803 804 805
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
806 807
		}

808
		vha->flags.management_server_logged_in = 0;
809
		ha->link_data_rate = PORT_SPEED_UNKNOWN;
810
		qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
L
Linus Torvalds 已提交
811 812 813
		break;

	case MBA_LIP_RESET:		/* LIP reset occurred */
814
		ql_dbg(ql_dbg_async, vha, 0x500c,
815
		    "LIP reset occurred (%x).\n", mb[1]);
L
Linus Torvalds 已提交
816

817 818 819 820
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
821 822
		}

823 824 825
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
826 827
		}

828
		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
829 830

		ha->operating_mode = LOOP;
831 832
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
L
Linus Torvalds 已提交
833 834
		break;

835
	/* case MBA_DCBX_COMPLETE: */
L
Linus Torvalds 已提交
836 837 838 839
	case MBA_POINT_TO_POINT:	/* Point-to-Point */
		if (IS_QLA2100(ha))
			break;

840
		if (IS_CNA_CAPABLE(ha)) {
841 842 843
			ql_dbg(ql_dbg_async, vha, 0x500d,
			    "DCBX Completed -- %04x %04x %04x.\n",
			    mb[1], mb[2], mb[3]);
844
			if (ha->notify_dcbx_comp && !vha->vp_idx)
845 846 847
				complete(&ha->dcbx_comp);

		} else
848 849
			ql_dbg(ql_dbg_async, vha, 0x500e,
			    "Asynchronous P2P MODE received.\n");
L
Linus Torvalds 已提交
850 851 852 853 854

		/*
		 * Until there's a transition from loop down to loop up, treat
		 * this as loop down only.
		 */
855 856 857 858
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			if (!atomic_read(&vha->loop_down_timer))
				atomic_set(&vha->loop_down_timer,
L
Linus Torvalds 已提交
859
				    LOOP_DOWN_TIME);
860
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
861 862
		}

863 864 865
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
866 867
		}

868 869 870 871 872
		if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
			set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);

		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
873 874

		ha->flags.gpsc_supported = 1;
875
		vha->flags.management_server_logged_in = 0;
L
Linus Torvalds 已提交
876 877 878 879 880 881
		break;

	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
		if (IS_QLA2100(ha))
			break;

882
		ql_dbg(ql_dbg_async, vha, 0x500f,
L
Linus Torvalds 已提交
883 884
		    "Configuration change detected: value=%x.\n", mb[1]);

885 886 887 888
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			if (!atomic_read(&vha->loop_down_timer))
				atomic_set(&vha->loop_down_timer,
L
Linus Torvalds 已提交
889
				    LOOP_DOWN_TIME);
890
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
891 892
		}

893 894 895
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
896 897
		}

898 899
		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
L
Linus Torvalds 已提交
900 901 902
		break;

	case MBA_PORT_UPDATE:		/* Port database update */
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
		/*
		 * Handle only global and vn-port update events
		 *
		 * Relevant inputs:
		 * mb[1] = N_Port handle of changed port
		 * OR 0xffff for global event
		 * mb[2] = New login state
		 * 7 = Port logged out
		 * mb[3] = LSB is vp_idx, 0xff = all vps
		 *
		 * Skip processing if:
		 *       Event is global, vp_idx is NOT all vps,
		 *           vp_idx does not match
		 *       Event is not global, vp_idx does not match
		 */
918 919 920 921
		if (IS_QLA2XXX_MIDTYPE(ha) &&
		    ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
			(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
			break;
922

923
		if (mb[2] == 0x7) {
924
			ql_dbg(ql_dbg_async, vha, 0x5010,
925 926
			    "Port %s %04x %04x %04x.\n",
			    mb[1] == 0xffff ? "unavailable" : "logout",
927
			    mb[1], mb[2], mb[3]);
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944

			if (mb[1] == 0xffff)
				goto global_port_update;

			/* Port logout */
			fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
			if (!fcport)
				break;
			if (atomic_read(&fcport->state) != FCS_ONLINE)
				break;
			ql_dbg(ql_dbg_async, vha, 0x508a,
			    "Marking port lost loopid=%04x portid=%06x.\n",
			    fcport->loop_id, fcport->d_id.b24);
			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
			break;

global_port_update:
945 946 947 948 949 950 951 952 953 954 955 956
			if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
				atomic_set(&vha->loop_state, LOOP_DOWN);
				atomic_set(&vha->loop_down_timer,
				    LOOP_DOWN_TIME);
				vha->device_flags |= DFLG_NO_CABLE;
				qla2x00_mark_all_devices_lost(vha, 1);
			}

			if (vha->vp_idx) {
				atomic_set(&vha->vp_state, VP_FAILED);
				fc_vport_set_state(vha->fc_vport,
				    FC_VPORT_FAILED);
957
				qla2x00_mark_all_devices_lost(vha, 1);
958 959 960 961 962 963 964
			}

			vha->flags.management_server_logged_in = 0;
			ha->link_data_rate = PORT_SPEED_UNKNOWN;
			break;
		}

L
Linus Torvalds 已提交
965
		/*
966
		 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
L
Linus Torvalds 已提交
967 968 969
		 * event etc. earlier indicating loop is down) then process
		 * it.  Otherwise ignore it and Wait for RSCN to come in.
		 */
970
		atomic_set(&vha->loop_down_timer, 0);
971 972
		if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
		    atomic_read(&vha->loop_state) != LOOP_DEAD) {
973 974 975
			ql_dbg(ql_dbg_async, vha, 0x5011,
			    "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
			    mb[1], mb[2], mb[3]);
976 977

			qlt_async_event(mb[0], vha, mb);
L
Linus Torvalds 已提交
978 979 980
			break;
		}

981 982 983
		ql_dbg(ql_dbg_async, vha, 0x5012,
		    "Port database changed %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
984 985 986 987

		/*
		 * Mark all devices as missing so we will login again.
		 */
988
		atomic_set(&vha->loop_state, LOOP_UP);
L
Linus Torvalds 已提交
989

990
		qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
991

992 993 994
		if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
			set_bit(SCR_PENDING, &vha->dpc_flags);

995 996
		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
997
		set_bit(VP_CONFIG_OK, &vha->vp_flags);
998 999

		qlt_async_event(mb[0], vha, mb);
L
Linus Torvalds 已提交
1000 1001 1002
		break;

	case MBA_RSCN_UPDATE:		/* State Change Registration */
1003
		/* Check if the Vport has issued a SCR */
1004
		if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1005 1006
			break;
		/* Only handle SCNs for our Vport index. */
1007
		if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1008
			break;
1009

1010 1011 1012
		ql_dbg(ql_dbg_async, vha, 0x5013,
		    "RSCN database changed -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
1013

1014
		rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1015 1016
		host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
				| vha->d_id.b.al_pa;
L
Linus Torvalds 已提交
1017
		if (rscn_entry == host_pid) {
1018 1019 1020
			ql_dbg(ql_dbg_async, vha, 0x5014,
			    "Ignoring RSCN update to local host "
			    "port ID (%06x).\n", host_pid);
L
Linus Torvalds 已提交
1021 1022 1023
			break;
		}

1024 1025
		/* Ignore reserved bits from RSCN-payload. */
		rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
L
Linus Torvalds 已提交
1026

1027 1028 1029 1030
		/* Skip RSCNs for virtual ports on the same physical port */
		if (qla2x00_is_a_vp_did(vha, rscn_entry))
			break;

1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
		/*
		 * Search for the rport related to this RSCN entry and mark it
		 * as lost.
		 */
		list_for_each_entry(fcport, &vha->vp_fcports, list) {
			if (atomic_read(&fcport->state) != FCS_ONLINE)
				continue;
			if (fcport->d_id.b24 == rscn_entry) {
				qla2x00_mark_device_lost(vha, fcport, 0, 0);
				break;
			}
		}

1044 1045
		atomic_set(&vha->loop_down_timer, 0);
		vha->flags.management_server_logged_in = 0;
L
Linus Torvalds 已提交
1046

1047 1048 1049
		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
		set_bit(RSCN_UPDATE, &vha->dpc_flags);
		qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
L
Linus Torvalds 已提交
1050 1051 1052 1053
		break;

	/* case MBA_RIO_RESPONSE: */
	case MBA_ZIO_RESPONSE:
1054 1055
		ql_dbg(ql_dbg_async, vha, 0x5015,
		    "[R|Z]IO update completion.\n");
L
Linus Torvalds 已提交
1056

1057
		if (IS_FWI2_CAPABLE(ha))
1058
			qla24xx_process_response_queue(vha, rsp);
1059
		else
1060
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
1061
		break;
1062 1063

	case MBA_DISCARD_RND_FRAME:
1064 1065 1066
		ql_dbg(ql_dbg_async, vha, 0x5016,
		    "Discard RND Frame -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1067
		break;
1068 1069

	case MBA_TRACE_NOTIFICATION:
1070 1071
		ql_dbg(ql_dbg_async, vha, 0x5017,
		    "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1072
		break;
1073 1074

	case MBA_ISP84XX_ALERT:
1075 1076 1077
		ql_dbg(ql_dbg_async, vha, 0x5018,
		    "ISP84XX Alert Notification -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1078 1079 1080 1081

		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
		switch (mb[1]) {
		case A84_PANIC_RECOVERY:
1082 1083 1084
			ql_log(ql_log_info, vha, 0x5019,
			    "Alert 84XX: panic recovery %04x %04x.\n",
			    mb[2], mb[3]);
1085 1086 1087
			break;
		case A84_OP_LOGIN_COMPLETE:
			ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1088 1089 1090
			ql_log(ql_log_info, vha, 0x501a,
			    "Alert 84XX: firmware version %x.\n",
			    ha->cs84xx->op_fw_version);
1091 1092 1093
			break;
		case A84_DIAG_LOGIN_COMPLETE:
			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1094 1095 1096
			ql_log(ql_log_info, vha, 0x501b,
			    "Alert 84XX: diagnostic firmware version %x.\n",
			    ha->cs84xx->diag_fw_version);
1097 1098 1099 1100
			break;
		case A84_GOLD_LOGIN_COMPLETE:
			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
			ha->cs84xx->fw_update = 1;
1101 1102 1103
			ql_log(ql_log_info, vha, 0x501c,
			    "Alert 84XX: gold firmware version %x.\n",
			    ha->cs84xx->gold_fw_version);
1104 1105
			break;
		default:
1106 1107
			ql_log(ql_log_warn, vha, 0x501d,
			    "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1108 1109 1110 1111
			    mb[1], mb[2], mb[3]);
		}
		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
		break;
1112
	case MBA_DCBX_START:
1113 1114 1115
		ql_dbg(ql_dbg_async, vha, 0x501e,
		    "DCBX Started -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1116 1117
		break;
	case MBA_DCBX_PARAM_UPDATE:
1118 1119 1120
		ql_dbg(ql_dbg_async, vha, 0x501f,
		    "DCBX Parameters Updated -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1121 1122
		break;
	case MBA_FCF_CONF_ERR:
1123 1124 1125
		ql_dbg(ql_dbg_async, vha, 0x5020,
		    "FCF Configuration Error -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1126 1127
		break;
	case MBA_IDC_NOTIFY:
1128
		if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1129 1130 1131 1132
			mb[4] = RD_REG_WORD(&reg24->mailbox4);
			if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
			    (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
			    (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1133
				set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1134 1135 1136 1137 1138 1139
				/*
				 * Extend loop down timer since port is active.
				 */
				if (atomic_read(&vha->loop_state) == LOOP_DOWN)
					atomic_set(&vha->loop_down_timer,
					    LOOP_DOWN_TIME);
1140 1141
				qla2xxx_wake_dpc(vha);
			}
1142
		}
1143
	case MBA_IDC_COMPLETE:
1144
		if (ha->notify_lb_portup_comp && !vha->vp_idx)
1145 1146
			complete(&ha->lb_portup_comp);
		/* Fallthru */
1147
	case MBA_IDC_TIME_EXT:
1148 1149
		if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
		    IS_QLA8044(ha))
1150 1151 1152 1153 1154 1155 1156 1157 1158
			qla81xx_idc_event(vha, mb[0], mb[1]);
		break;

	case MBA_IDC_AEN:
		mb[4] = RD_REG_WORD(&reg24->mailbox4);
		mb[5] = RD_REG_WORD(&reg24->mailbox5);
		mb[6] = RD_REG_WORD(&reg24->mailbox6);
		mb[7] = RD_REG_WORD(&reg24->mailbox7);
		qla83xx_handle_8200_aen(vha, mb);
1159
		break;
1160

1161 1162
	case MBA_DPORT_DIAGNOSTICS:
		ql_dbg(ql_dbg_async, vha, 0x5052,
1163
		    "D-Port Diagnostics: %04x result=%s\n",
1164
		    mb[0],
1165
		    mb[1] == 0 ? "start" :
1166 1167
		    mb[1] == 1 ? "done (pass)" :
		    mb[1] == 2 ? "done (error)" : "other");
1168 1169
		break;

1170 1171 1172 1173 1174 1175 1176
	case MBA_TEMPERATURE_ALERT:
		ql_dbg(ql_dbg_async, vha, 0x505e,
		    "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
		if (mb[1] == 0x12)
			schedule_work(&ha->board_disable);
		break;

1177 1178 1179 1180
	default:
		ql_dbg(ql_dbg_async, vha, 0x5057,
		    "Unknown AEN:%04x %04x %04x %04x\n",
		    mb[0], mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
1181
	}
1182

1183 1184
	qlt_async_event(mb[0], vha, mb);

1185
	if (!vha->vp_idx && ha->num_vhosts)
1186
		qla2x00_alert_all_vps(rsp, mb);
L
Linus Torvalds 已提交
1187 1188 1189 1190 1191 1192 1193
}

/**
 * qla2x00_process_completed_request() - Process a Fast Post response.
 * @ha: SCSI driver HA context
 * @index: SRB index
 */
1194
void
1195
qla2x00_process_completed_request(struct scsi_qla_host *vha,
1196
				  struct req_que *req, uint32_t index)
L
Linus Torvalds 已提交
1197 1198
{
	srb_t *sp;
1199
	struct qla_hw_data *ha = vha->hw;
L
Linus Torvalds 已提交
1200 1201

	/* Validate handle. */
1202
	if (index >= req->num_outstanding_cmds) {
1203 1204
		ql_log(ql_log_warn, vha, 0x3014,
		    "Invalid SCSI command index (%x).\n", index);
L
Linus Torvalds 已提交
1205

1206
		if (IS_P3P_TYPE(ha))
1207 1208 1209
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1210 1211 1212
		return;
	}

1213
	sp = req->outstanding_cmds[index];
L
Linus Torvalds 已提交
1214 1215
	if (sp) {
		/* Free outstanding command slot. */
1216
		req->outstanding_cmds[index] = NULL;
L
Linus Torvalds 已提交
1217 1218

		/* Save ISP completion status */
1219
		sp->done(ha, sp, DID_OK << 16);
L
Linus Torvalds 已提交
1220
	} else {
1221
		ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
L
Linus Torvalds 已提交
1222

1223
		if (IS_P3P_TYPE(ha))
1224 1225 1226
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1227 1228 1229
	}
}

1230
srb_t *
1231 1232 1233 1234 1235 1236 1237 1238 1239
qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
    struct req_que *req, void *iocb)
{
	struct qla_hw_data *ha = vha->hw;
	sts_entry_t *pkt = iocb;
	srb_t *sp = NULL;
	uint16_t index;

	index = LSW(pkt->handle);
1240
	if (index >= req->num_outstanding_cmds) {
1241 1242
		ql_log(ql_log_warn, vha, 0x5031,
		    "Invalid command index (%x).\n", index);
1243
		if (IS_P3P_TYPE(ha))
1244 1245 1246
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1247 1248 1249 1250
		goto done;
	}
	sp = req->outstanding_cmds[index];
	if (!sp) {
1251 1252
		ql_log(ql_log_warn, vha, 0x5032,
		    "Invalid completion handle (%x) -- timed-out.\n", index);
1253 1254 1255
		return sp;
	}
	if (sp->handle != index) {
1256 1257
		ql_log(ql_log_warn, vha, 0x5033,
		    "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1258 1259
		return NULL;
	}
1260

1261
	req->outstanding_cmds[index] = NULL;
1262

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
done:
	return sp;
}

static void
qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct mbx_entry *mbx)
{
	const char func[] = "MBX-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
1275
	struct srb_iocb *lio;
1276
	uint16_t *data;
1277
	uint16_t status;
1278 1279 1280 1281 1282

	sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
	if (!sp)
		return;

1283 1284
	lio = &sp->u.iocb_cmd;
	type = sp->name;
1285
	fcport = sp->fcport;
1286
	data = lio->u.logio.data;
1287

1288
	data[0] = MBS_COMMAND_ERROR;
1289
	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1290
	    QLA_LOGIO_LOGIN_RETRIED : 0;
1291
	if (mbx->entry_status) {
1292
		ql_dbg(ql_dbg_async, vha, 0x5043,
1293
		    "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1294
		    "entry-status=%x status=%x state-flag=%x "
1295 1296
		    "status-flags=%x.\n", type, sp->handle,
		    fcport->d_id.b.domain, fcport->d_id.b.area,
1297 1298
		    fcport->d_id.b.al_pa, mbx->entry_status,
		    le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1299
		    le16_to_cpu(mbx->status_flags));
1300

1301
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1302
		    (uint8_t *)mbx, sizeof(*mbx));
1303

1304
		goto logio_done;
1305 1306
	}

1307
	status = le16_to_cpu(mbx->status);
1308
	if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1309 1310 1311
	    le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
		status = 0;
	if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1312
		ql_dbg(ql_dbg_async, vha, 0x5045,
1313 1314 1315 1316
		    "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
		    type, sp->handle, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
		    le16_to_cpu(mbx->mb1));
1317 1318

		data[0] = MBS_COMMAND_COMPLETE;
1319
		if (sp->type == SRB_LOGIN_CMD) {
1320 1321 1322
			fcport->port_type = FCT_TARGET;
			if (le16_to_cpu(mbx->mb1) & BIT_0)
				fcport->port_type = FCT_INITIATOR;
1323
			else if (le16_to_cpu(mbx->mb1) & BIT_1)
1324
				fcport->flags |= FCF_FCP2_DEVICE;
1325
		}
1326
		goto logio_done;
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
	}

	data[0] = le16_to_cpu(mbx->mb0);
	switch (data[0]) {
	case MBS_PORT_ID_USED:
		data[1] = le16_to_cpu(mbx->mb1);
		break;
	case MBS_LOOP_ID_USED:
		break;
	default:
		data[0] = MBS_COMMAND_ERROR;
		break;
	}

1341
	ql_log(ql_log_warn, vha, 0x5046,
1342 1343 1344 1345
	    "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
	    "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
	    status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1346
	    le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1347
	    le16_to_cpu(mbx->mb7));
1348

1349
logio_done:
1350
	sp->done(vha, sp, 0);
1351 1352
}

1353 1354 1355 1356 1357 1358 1359
static void
qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
    sts_entry_t *pkt, int iocb_type)
{
	const char func[] = "CT_IOCB";
	const char *type;
	srb_t *sp;
1360
	struct bsg_job *bsg_job;
1361
	struct fc_bsg_reply *bsg_reply;
1362
	uint16_t comp_status;
1363
	int res;
1364 1365 1366 1367 1368

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;

1369
	bsg_job = sp->u.bsg_job;
1370
	bsg_reply = bsg_job->reply;
1371

1372
	type = "ct pass-through";
1373 1374 1375 1376 1377 1378

	comp_status = le16_to_cpu(pkt->comp_status);

	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
	 * fc payload  to the caller
	 */
1379
	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1380 1381 1382 1383
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);

	if (comp_status != CS_COMPLETE) {
		if (comp_status == CS_DATA_UNDERRUN) {
1384
			res = DID_OK << 16;
1385
			bsg_reply->reply_payload_rcv_len =
1386 1387
			    le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);

1388 1389
			ql_log(ql_log_warn, vha, 0x5048,
			    "CT pass-through-%s error "
1390
			    "comp_status-status=0x%x total_byte = 0x%x.\n",
1391
			    type, comp_status,
1392
			    bsg_reply->reply_payload_rcv_len);
1393
		} else {
1394 1395 1396
			ql_log(ql_log_warn, vha, 0x5049,
			    "CT pass-through-%s error "
			    "comp_status-status=0x%x.\n", type, comp_status);
1397
			res = DID_ERROR << 16;
1398
			bsg_reply->reply_payload_rcv_len = 0;
1399
		}
1400
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1401
		    (uint8_t *)pkt, sizeof(*pkt));
1402
	} else {
1403
		res = DID_OK << 16;
1404
		bsg_reply->reply_payload_rcv_len =
1405 1406 1407 1408
		    bsg_job->reply_payload.payload_len;
		bsg_job->reply_len = 0;
	}

1409
	sp->done(vha, sp, res);
1410 1411
}

1412 1413 1414 1415 1416 1417 1418
static void
qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct sts_entry_24xx *pkt, int iocb_type)
{
	const char func[] = "ELS_CT_IOCB";
	const char *type;
	srb_t *sp;
1419
	struct bsg_job *bsg_job;
1420
	struct fc_bsg_reply *bsg_reply;
1421 1422 1423
	uint16_t comp_status;
	uint32_t fw_status[3];
	uint8_t* fw_sts_ptr;
1424
	int res;
1425 1426 1427 1428

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;
1429
	bsg_job = sp->u.bsg_job;
1430
	bsg_reply = bsg_job->reply;
1431 1432

	type = NULL;
1433
	switch (sp->type) {
1434 1435 1436 1437 1438 1439 1440
	case SRB_ELS_CMD_RPT:
	case SRB_ELS_CMD_HST:
		type = "els";
		break;
	case SRB_CT_CMD:
		type = "ct pass-through";
		break;
1441 1442 1443 1444 1445 1446
	case SRB_ELS_DCMD:
		type = "Driver ELS logo";
		ql_dbg(ql_dbg_user, vha, 0x5047,
		    "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
		sp->done(vha, sp, 0);
		return;
1447
	default:
1448
		ql_dbg(ql_dbg_user, vha, 0x503e,
1449
		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
		return;
	}

	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
	fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
	fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);

	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
	 * fc payload  to the caller
	 */
1460
	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1461 1462 1463 1464
	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);

	if (comp_status != CS_COMPLETE) {
		if (comp_status == CS_DATA_UNDERRUN) {
1465
			res = DID_OK << 16;
1466
			bsg_reply->reply_payload_rcv_len =
1467
			    le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1468

1469
			ql_dbg(ql_dbg_user, vha, 0x503f,
1470
			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1471
			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1472
			    type, sp->handle, comp_status, fw_status[1], fw_status[2],
1473 1474
			    le16_to_cpu(((struct els_sts_entry_24xx *)
				pkt)->total_byte_count));
1475 1476 1477 1478
			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
		}
		else {
1479
			ql_dbg(ql_dbg_user, vha, 0x5040,
1480
			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1481
			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
1482
			    type, sp->handle, comp_status,
1483 1484 1485 1486
			    le16_to_cpu(((struct els_sts_entry_24xx *)
				pkt)->error_subcode_1),
			    le16_to_cpu(((struct els_sts_entry_24xx *)
				    pkt)->error_subcode_2));
1487
			res = DID_ERROR << 16;
1488
			bsg_reply->reply_payload_rcv_len = 0;
1489 1490 1491
			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
		}
1492
		ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1493
				(uint8_t *)pkt, sizeof(*pkt));
1494 1495
	}
	else {
1496
		res =  DID_OK << 16;
1497
		bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1498 1499 1500
		bsg_job->reply_len = 0;
	}

1501
	sp->done(vha, sp, res);
1502 1503
}

1504 1505 1506 1507 1508 1509 1510 1511
static void
qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct logio_entry_24xx *logio)
{
	const char func[] = "LOGIO-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
1512
	struct srb_iocb *lio;
1513
	uint16_t *data;
1514 1515 1516 1517 1518 1519
	uint32_t iop[2];

	sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
	if (!sp)
		return;

1520 1521
	lio = &sp->u.iocb_cmd;
	type = sp->name;
1522
	fcport = sp->fcport;
1523
	data = lio->u.logio.data;
1524

1525
	data[0] = MBS_COMMAND_ERROR;
1526
	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1527
		QLA_LOGIO_LOGIN_RETRIED : 0;
1528
	if (logio->entry_status) {
1529
		ql_log(ql_log_warn, fcport->vha, 0x5034,
1530
		    "Async-%s error entry - hdl=%x"
1531
		    "portid=%02x%02x%02x entry-status=%x.\n",
1532 1533 1534 1535
		    type, sp->handle, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
		    logio->entry_status);
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1536
		    (uint8_t *)logio, sizeof(*logio));
1537

1538
		goto logio_done;
1539 1540 1541
	}

	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1542
		ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1543 1544 1545
		    "Async-%s complete - hdl=%x portid=%02x%02x%02x "
		    "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
1546
		    le32_to_cpu(logio->io_parameter[0]));
1547 1548

		data[0] = MBS_COMMAND_COMPLETE;
1549
		if (sp->type != SRB_LOGIN_CMD)
1550
			goto logio_done;
1551 1552 1553 1554 1555

		iop[0] = le32_to_cpu(logio->io_parameter[0]);
		if (iop[0] & BIT_4) {
			fcport->port_type = FCT_TARGET;
			if (iop[0] & BIT_8)
1556
				fcport->flags |= FCF_FCP2_DEVICE;
1557
		} else if (iop[0] & BIT_5)
1558
			fcport->port_type = FCT_INITIATOR;
1559

1560 1561 1562
		if (iop[0] & BIT_7)
			fcport->flags |= FCF_CONF_COMP_SUPPORTED;

1563 1564 1565 1566 1567
		if (logio->io_parameter[7] || logio->io_parameter[8])
			fcport->supported_classes |= FC_COS_CLASS2;
		if (logio->io_parameter[9] || logio->io_parameter[10])
			fcport->supported_classes |= FC_COS_CLASS3;

1568
		goto logio_done;
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
	}

	iop[0] = le32_to_cpu(logio->io_parameter[0]);
	iop[1] = le32_to_cpu(logio->io_parameter[1]);
	switch (iop[0]) {
	case LSC_SCODE_PORTID_USED:
		data[0] = MBS_PORT_ID_USED;
		data[1] = LSW(iop[1]);
		break;
	case LSC_SCODE_NPORT_USED:
		data[0] = MBS_LOOP_ID_USED;
		break;
	default:
		data[0] = MBS_COMMAND_ERROR;
		break;
	}

1586
	ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1587 1588
	    "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
	    "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1589
	    fcport->d_id.b.area, fcport->d_id.b.al_pa,
1590 1591
	    le16_to_cpu(logio->comp_status),
	    le32_to_cpu(logio->io_parameter[0]),
1592
	    le32_to_cpu(logio->io_parameter[1]));
1593

1594
logio_done:
1595
	sp->done(vha, sp, 0);
1596 1597
}

1598
static void
1599
qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
{
	const char func[] = "TMF-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
	struct srb_iocb *iocb;
	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;

	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
	if (!sp)
		return;

1612 1613
	iocb = &sp->u.iocb_cmd;
	type = sp->name;
1614
	fcport = sp->fcport;
1615
	iocb->u.tmf.data = QLA_SUCCESS;
1616 1617

	if (sts->entry_status) {
1618
		ql_log(ql_log_warn, fcport->vha, 0x5038,
1619 1620
		    "Async-%s error - hdl=%x entry-status(%x).\n",
		    type, sp->handle, sts->entry_status);
1621
		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1622
	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1623
		ql_log(ql_log_warn, fcport->vha, 0x5039,
1624 1625
		    "Async-%s error - hdl=%x completion status(%x).\n",
		    type, sp->handle, sts->comp_status);
1626 1627
		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
	} else if ((le16_to_cpu(sts->scsi_status) &
1628
	    SS_RESPONSE_INFO_LEN_VALID)) {
1629 1630 1631 1632 1633 1634 1635 1636
		if (le32_to_cpu(sts->rsp_data_len) < 4) {
			ql_log(ql_log_warn, fcport->vha, 0x503b,
			    "Async-%s error - hdl=%x not enough response(%d).\n",
			    type, sp->handle, sts->rsp_data_len);
		} else if (sts->data[3]) {
			ql_log(ql_log_warn, fcport->vha, 0x503c,
			    "Async-%s error - hdl=%x response(%x).\n",
			    type, sp->handle, sts->data[3]);
B
Bart Van Assche 已提交
1637
			iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1638
		}
1639 1640
	}

1641
	if (iocb->u.tmf.data != QLA_SUCCESS)
1642 1643
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
		    (uint8_t *)sts, sizeof(*sts));
1644

1645
	sp->done(vha, sp, 0);
1646 1647
}

L
Linus Torvalds 已提交
1648 1649 1650 1651 1652
/**
 * qla2x00_process_response_queue() - Process response queue entries.
 * @ha: SCSI driver HA context
 */
void
1653
qla2x00_process_response_queue(struct rsp_que *rsp)
L
Linus Torvalds 已提交
1654
{
1655 1656
	struct scsi_qla_host *vha;
	struct qla_hw_data *ha = rsp->hw;
1657
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
1658 1659 1660
	sts_entry_t	*pkt;
	uint16_t        handle_cnt;
	uint16_t        cnt;
1661

1662
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
1663

1664
	if (!vha->flags.online)
L
Linus Torvalds 已提交
1665 1666
		return;

1667 1668
	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
		pkt = (sts_entry_t *)rsp->ring_ptr;
L
Linus Torvalds 已提交
1669

1670 1671 1672 1673
		rsp->ring_index++;
		if (rsp->ring_index == rsp->length) {
			rsp->ring_index = 0;
			rsp->ring_ptr = rsp->ring;
L
Linus Torvalds 已提交
1674
		} else {
1675
			rsp->ring_ptr++;
L
Linus Torvalds 已提交
1676 1677 1678
		}

		if (pkt->entry_status != 0) {
1679
			qla2x00_error_entry(vha, rsp, pkt);
L
Linus Torvalds 已提交
1680 1681 1682 1683 1684 1685 1686
			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
			wmb();
			continue;
		}

		switch (pkt->entry_type) {
		case STATUS_TYPE:
1687
			qla2x00_status_entry(vha, rsp, pkt);
L
Linus Torvalds 已提交
1688 1689 1690 1691
			break;
		case STATUS_TYPE_21:
			handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
			for (cnt = 0; cnt < handle_cnt; cnt++) {
1692
				qla2x00_process_completed_request(vha, rsp->req,
L
Linus Torvalds 已提交
1693 1694 1695 1696 1697 1698
				    ((sts21_entry_t *)pkt)->handle[cnt]);
			}
			break;
		case STATUS_TYPE_22:
			handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
			for (cnt = 0; cnt < handle_cnt; cnt++) {
1699
				qla2x00_process_completed_request(vha, rsp->req,
L
Linus Torvalds 已提交
1700 1701 1702 1703
				    ((sts22_entry_t *)pkt)->handle[cnt]);
			}
			break;
		case STATUS_CONT_TYPE:
1704
			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
L
Linus Torvalds 已提交
1705
			break;
1706 1707 1708
		case MBX_IOCB_TYPE:
			qla2x00_mbx_iocb_entry(vha, rsp->req,
			    (struct mbx_entry *)pkt);
1709
			break;
1710 1711 1712
		case CT_IOCB_TYPE:
			qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
			break;
L
Linus Torvalds 已提交
1713 1714
		default:
			/* Type Not Supported. */
1715 1716
			ql_log(ql_log_warn, vha, 0x504a,
			    "Received unknown response pkt type %x "
L
Linus Torvalds 已提交
1717
			    "entry status=%x.\n",
1718
			    pkt->entry_type, pkt->entry_status);
L
Linus Torvalds 已提交
1719 1720 1721 1722 1723 1724 1725
			break;
		}
		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
		wmb();
	}

	/* Adjust ring index */
1726
	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
L
Linus Torvalds 已提交
1727 1728
}

1729
static inline void
1730
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1731
		     uint32_t sense_len, struct rsp_que *rsp, int res)
1732
{
1733
	struct scsi_qla_host *vha = sp->fcport->vha;
1734 1735
	struct scsi_cmnd *cp = GET_CMD_SP(sp);
	uint32_t track_sense_len;
1736 1737 1738 1739

	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
		sense_len = SCSI_SENSE_BUFFERSIZE;

1740 1741 1742 1743 1744
	SET_CMD_SENSE_LEN(sp, sense_len);
	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
	track_sense_len = sense_len;

	if (sense_len > par_sense_len)
1745
		sense_len = par_sense_len;
1746 1747 1748

	memcpy(cp->sense_buffer, sense_data, sense_len);

1749 1750 1751 1752 1753
	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
	track_sense_len -= sense_len;
	SET_CMD_SENSE_LEN(sp, track_sense_len);

	if (track_sense_len != 0) {
1754
		rsp->status_srb = sp;
1755 1756
		cp->result = res;
	}
1757

1758 1759
	if (sense_len) {
		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
H
Hannes Reinecke 已提交
1760
		    "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
1761 1762
		    sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
		    cp);
1763 1764
		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
		    cp->sense_buffer, sense_len);
1765
	}
1766 1767
}

1768 1769
struct scsi_dif_tuple {
	__be16 guard;       /* Checksum */
1770
	__be16 app_tag;         /* APPL identifier */
1771 1772 1773 1774 1775 1776 1777 1778 1779
	__be32 ref_tag;         /* Target LBA or indirect LBA */
};

/*
 * Checks the guard or meta-data for the type of error
 * detected by the HBA. In case of errors, we set the
 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
 * to indicate to the kernel that the HBA detected error.
 */
1780
static inline int
1781 1782
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
1783
	struct scsi_qla_host *vha = sp->fcport->vha;
1784
	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1785 1786
	uint8_t		*ap = &sts24->data[12];
	uint8_t		*ep = &sts24->data[20];
1787 1788 1789 1790
	uint32_t	e_ref_tag, a_ref_tag;
	uint16_t	e_app_tag, a_app_tag;
	uint16_t	e_guard, a_guard;

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
	/*
	 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
	 * would make guard field appear at offset 2
	 */
	a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
	a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
	a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
	e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
	e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
	e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1801

1802 1803
	ql_dbg(ql_dbg_io, vha, 0x3023,
	    "iocb(s) %p Returned STATUS.\n", sts24);
1804

1805 1806
	ql_dbg(ql_dbg_io, vha, 0x3024,
	    "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1807
	    " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1808
	    " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1809
	    cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1810
	    a_app_tag, e_app_tag, a_guard, e_guard);
1811

1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
	/*
	 * Ignore sector if:
	 * For type     3: ref & app tag is all 'f's
	 * For type 0,1,2: app tag is all 'f's
	 */
	if ((a_app_tag == 0xffff) &&
	    ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
	     (a_ref_tag == 0xffffffff))) {
		uint32_t blocks_done, resid;
		sector_t lba_s = scsi_get_lba(cmd);

		/* 2TB boundary case covered automatically with this */
		blocks_done = e_ref_tag - (uint32_t)lba_s + 1;

		resid = scsi_bufflen(cmd) - (blocks_done *
		    cmd->device->sector_size);

		scsi_set_resid(cmd, resid);
		cmd->result = DID_OK << 16;

		/* Update protection tag */
		if (scsi_prot_sg_count(cmd)) {
			uint32_t i, j = 0, k = 0, num_ent;
			struct scatterlist *sg;
1836
			struct t10_pi_tuple *spt;
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851

			/* Patch the corresponding protection tags */
			scsi_for_each_prot_sg(cmd, sg,
			    scsi_prot_sg_count(cmd), i) {
				num_ent = sg_dma_len(sg) / 8;
				if (k + num_ent < blocks_done) {
					k += num_ent;
					continue;
				}
				j = blocks_done - k - 1;
				k = blocks_done;
				break;
			}

			if (k != blocks_done) {
1852
				ql_log(ql_log_warn, vha, 0x302f,
1853 1854
				    "unexpected tag values tag:lba=%x:%llx)\n",
				    e_ref_tag, (unsigned long long)lba_s);
1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
				return 1;
			}

			spt = page_address(sg_page(sg)) + sg->offset;
			spt += j;

			spt->app_tag = 0xffff;
			if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
				spt->ref_tag = 0xffffffff;
		}

		return 0;
	}

1869 1870 1871 1872 1873 1874 1875
	/* check guard */
	if (e_guard != a_guard) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
		    0x10, 0x1);
		set_driver_byte(cmd, DRIVER_SENSE);
		set_host_byte(cmd, DID_ABORT);
		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1876
		return 1;
1877 1878
	}

1879 1880
	/* check ref tag */
	if (e_ref_tag != a_ref_tag) {
1881
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1882
		    0x10, 0x3);
1883 1884 1885
		set_driver_byte(cmd, DRIVER_SENSE);
		set_host_byte(cmd, DID_ABORT);
		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1886
		return 1;
1887 1888
	}

1889 1890
	/* check appl tag */
	if (e_app_tag != a_app_tag) {
1891
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1892
		    0x10, 0x2);
1893 1894 1895
		set_driver_byte(cmd, DRIVER_SENSE);
		set_host_byte(cmd, DID_ABORT);
		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1896
		return 1;
1897
	}
1898

1899
	return 1;
1900 1901
}

1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
static void
qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
				  struct req_que *req, uint32_t index)
{
	struct qla_hw_data *ha = vha->hw;
	srb_t *sp;
	uint16_t	comp_status;
	uint16_t	scsi_status;
	uint16_t thread_id;
	uint32_t rval = EXT_STATUS_OK;
1912
	struct bsg_job *bsg_job = NULL;
1913 1914
	struct fc_bsg_request *bsg_request;
	struct fc_bsg_reply *bsg_reply;
1915 1916 1917 1918 1919 1920
	sts_entry_t *sts;
	struct sts_entry_24xx *sts24;
	sts = (sts_entry_t *) pkt;
	sts24 = (struct sts_entry_24xx *) pkt;

	/* Validate handle. */
1921
	if (index >= req->num_outstanding_cmds) {
1922 1923 1924 1925 1926 1927 1928
		ql_log(ql_log_warn, vha, 0x70af,
		    "Invalid SCSI completion handle 0x%x.\n", index);
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		return;
	}

	sp = req->outstanding_cmds[index];
1929
	if (!sp) {
1930 1931 1932 1933 1934 1935 1936 1937
		ql_log(ql_log_warn, vha, 0x70b0,
		    "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
		    req->id, index);

		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		return;
	}

1938 1939 1940 1941 1942 1943
	/* Free outstanding command slot. */
	req->outstanding_cmds[index] = NULL;
	bsg_job = sp->u.bsg_job;
	bsg_request = bsg_job->request;
	bsg_reply = bsg_job->reply;

1944 1945 1946 1947 1948 1949 1950 1951
	if (IS_FWI2_CAPABLE(ha)) {
		comp_status = le16_to_cpu(sts24->comp_status);
		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
	} else {
		comp_status = le16_to_cpu(sts->comp_status);
		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
	}

1952
	thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1953 1954 1955
	switch (comp_status) {
	case CS_COMPLETE:
		if (scsi_status == 0) {
1956
			bsg_reply->reply_payload_rcv_len =
1957
					bsg_job->reply_payload.payload_len;
1958
			vha->qla_stats.input_bytes +=
1959
				bsg_reply->reply_payload_rcv_len;
1960
			vha->qla_stats.input_requests++;
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039
			rval = EXT_STATUS_OK;
		}
		goto done;

	case CS_DATA_OVERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b1,
		    "Command completed with date overrun thread_id=%d\n",
		    thread_id);
		rval = EXT_STATUS_DATA_OVERRUN;
		break;

	case CS_DATA_UNDERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b2,
		    "Command completed with date underrun thread_id=%d\n",
		    thread_id);
		rval = EXT_STATUS_DATA_UNDERRUN;
		break;
	case CS_BIDIR_RD_OVERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b3,
		    "Command completed with read data overrun thread_id=%d\n",
		    thread_id);
		rval = EXT_STATUS_DATA_OVERRUN;
		break;

	case CS_BIDIR_RD_WR_OVERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b4,
		    "Command completed with read and write data overrun "
		    "thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_OVERRUN;
		break;

	case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b5,
		    "Command completed with read data over and write data "
		    "underrun thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_OVERRUN;
		break;

	case CS_BIDIR_RD_UNDERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b6,
		    "Command completed with read data data underrun "
		    "thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_UNDERRUN;
		break;

	case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b7,
		    "Command completed with read data under and write data "
		    "overrun thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_UNDERRUN;
		break;

	case CS_BIDIR_RD_WR_UNDERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b8,
		    "Command completed with read and write data underrun "
		    "thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_UNDERRUN;
		break;

	case CS_BIDIR_DMA:
		ql_dbg(ql_dbg_user, vha, 0x70b9,
		    "Command completed with data DMA error thread_id=%d\n",
		    thread_id);
		rval = EXT_STATUS_DMA_ERR;
		break;

	case CS_TIMEOUT:
		ql_dbg(ql_dbg_user, vha, 0x70ba,
		    "Command completed with timeout thread_id=%d\n",
		    thread_id);
		rval = EXT_STATUS_TIMEOUT;
		break;
	default:
		ql_dbg(ql_dbg_user, vha, 0x70bb,
		    "Command completed with completion status=0x%x "
		    "thread_id=%d\n", comp_status, thread_id);
		rval = EXT_STATUS_ERR;
		break;
	}
2040
	bsg_reply->reply_payload_rcv_len = 0;
2041 2042 2043

done:
	/* Return the vendor specific reply to API */
2044
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2045 2046 2047 2048 2049 2050 2051
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
	/* Always return DID_OK, bsg will send the vendor specific response
	 * in this case only */
	sp->done(vha, sp, (DID_OK << 6));

}

L
Linus Torvalds 已提交
2052 2053 2054 2055 2056 2057
/**
 * qla2x00_status_entry() - Process a Status IOCB entry.
 * @ha: SCSI driver HA context
 * @pkt: Entry pointer
 */
static void
2058
qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
L
Linus Torvalds 已提交
2059 2060 2061 2062
{
	srb_t		*sp;
	fc_port_t	*fcport;
	struct scsi_cmnd *cp;
2063 2064
	sts_entry_t *sts;
	struct sts_entry_24xx *sts24;
L
Linus Torvalds 已提交
2065 2066
	uint16_t	comp_status;
	uint16_t	scsi_status;
2067
	uint16_t	ox_id;
L
Linus Torvalds 已提交
2068 2069
	uint8_t		lscsi_status;
	int32_t		resid;
2070 2071
	uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
	    fw_resid_len;
2072
	uint8_t		*rsp_info, *sense_data;
2073
	struct qla_hw_data *ha = vha->hw;
2074 2075 2076
	uint32_t handle;
	uint16_t que;
	struct req_que *req;
2077
	int logit = 1;
2078
	int res = 0;
2079
	uint16_t state_flags = 0;
2080
	uint16_t retry_delay = 0;
2081 2082 2083

	sts = (sts_entry_t *) pkt;
	sts24 = (struct sts_entry_24xx *) pkt;
2084
	if (IS_FWI2_CAPABLE(ha)) {
2085 2086
		comp_status = le16_to_cpu(sts24->comp_status);
		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2087
		state_flags = le16_to_cpu(sts24->state_flags);
2088 2089 2090 2091
	} else {
		comp_status = le16_to_cpu(sts->comp_status);
		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
	}
2092 2093 2094
	handle = (uint32_t) LSW(sts->handle);
	que = MSW(sts->handle);
	req = ha->req_q_map[que];
2095

2096 2097 2098 2099 2100 2101 2102 2103 2104
	/* Check for invalid queue pointer */
	if (req == NULL ||
	    que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
		ql_dbg(ql_dbg_io, vha, 0x3059,
		    "Invalid status handle (0x%x): Bad req pointer. req=%p, "
		    "que=%u.\n", sts->handle, req, que);
		return;
	}

L
Linus Torvalds 已提交
2105
	/* Validate handle. */
2106
	if (handle < req->num_outstanding_cmds) {
2107
		sp = req->outstanding_cmds[handle];
2108 2109 2110 2111 2112 2113 2114
		if (!sp) {
			ql_dbg(ql_dbg_io, vha, 0x3075,
			    "%s(%ld): Already returned command for status handle (0x%x).\n",
			    __func__, vha->host_no, sts->handle);
			return;
		}
	} else {
2115
		ql_dbg(ql_dbg_io, vha, 0x3017,
2116 2117
		    "Invalid status handle, out of range (0x%x).\n",
		    sts->handle);
L
Linus Torvalds 已提交
2118

2119 2120 2121 2122 2123 2124 2125
		if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
			if (IS_P3P_TYPE(ha))
				set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
			else
				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
			qla2xxx_wake_dpc(vha);
		}
L
Linus Torvalds 已提交
2126 2127
		return;
	}
2128 2129 2130 2131 2132 2133

	if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
		qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
		return;
	}

2134 2135 2136 2137 2138 2139
	/* Task Management completion. */
	if (sp->type == SRB_TM_CMD) {
		qla24xx_tm_iocb_entry(vha, req, pkt);
		return;
	}

2140 2141 2142 2143 2144 2145 2146 2147
	/* Fast path completion. */
	if (comp_status == CS_COMPLETE && scsi_status == 0) {
		qla2x00_process_completed_request(vha, req, handle);

		return;
	}

	req->outstanding_cmds[handle] = NULL;
2148
	cp = GET_CMD_SP(sp);
L
Linus Torvalds 已提交
2149
	if (cp == NULL) {
2150
		ql_dbg(ql_dbg_io, vha, 0x3018,
2151 2152
		    "Command already returned (0x%x/%p).\n",
		    sts->handle, sp);
L
Linus Torvalds 已提交
2153 2154 2155 2156

		return;
	}

2157
	lscsi_status = scsi_status & STATUS_MASK;
L
Linus Torvalds 已提交
2158

2159
	fcport = sp->fcport;
L
Linus Torvalds 已提交
2160

2161
	ox_id = 0;
2162 2163
	sense_len = par_sense_len = rsp_info_len = resid_len =
	    fw_resid_len = 0;
2164
	if (IS_FWI2_CAPABLE(ha)) {
2165 2166 2167 2168 2169 2170 2171 2172
		if (scsi_status & SS_SENSE_LEN_VALID)
			sense_len = le32_to_cpu(sts24->sense_len);
		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
			rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
			resid_len = le32_to_cpu(sts24->rsp_residual_count);
		if (comp_status == CS_DATA_UNDERRUN)
			fw_resid_len = le32_to_cpu(sts24->residual_len);
2173 2174 2175
		rsp_info = sts24->data;
		sense_data = sts24->data;
		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2176
		ox_id = le16_to_cpu(sts24->ox_id);
2177
		par_sense_len = sizeof(sts24->data);
2178 2179 2180
		/* Valid values of the retry delay timer are 0x1-0xffef */
		if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
			retry_delay = sts24->retry_delay;
2181
	} else {
2182 2183 2184 2185
		if (scsi_status & SS_SENSE_LEN_VALID)
			sense_len = le16_to_cpu(sts->req_sense_length);
		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
			rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2186 2187 2188
		resid_len = le32_to_cpu(sts->residual_length);
		rsp_info = sts->rsp_info;
		sense_data = sts->req_sense_data;
2189
		par_sense_len = sizeof(sts->req_sense_data);
2190 2191
	}

L
Linus Torvalds 已提交
2192 2193
	/* Check for any FCP transport errors. */
	if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2194
		/* Sense data lies beyond any FCP RESPONSE data. */
2195
		if (IS_FWI2_CAPABLE(ha)) {
2196
			sense_data += rsp_info_len;
2197 2198
			par_sense_len -= rsp_info_len;
		}
2199
		if (rsp_info_len > 3 && rsp_info[3]) {
2200
			ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2201 2202
			    "FCP I/O protocol failure (0x%x/0x%x).\n",
			    rsp_info_len, rsp_info[3]);
L
Linus Torvalds 已提交
2203

2204
			res = DID_BUS_BUSY << 16;
2205
			goto out;
L
Linus Torvalds 已提交
2206 2207 2208
		}
	}

2209 2210 2211 2212 2213
	/* Check for overrun. */
	if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
	    scsi_status & SS_RESIDUAL_OVER)
		comp_status = CS_DATA_OVERRUN;

2214 2215 2216 2217 2218 2219 2220 2221
	/*
	 * Check retry_delay_timer value if we receive a busy or
	 * queue full.
	 */
	if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
	    lscsi_status == SAM_STAT_BUSY)
		qla2x00_set_retry_delay_timestamp(fcport, retry_delay);

L
Linus Torvalds 已提交
2222 2223 2224 2225 2226
	/*
	 * Based on Host and scsi status generate status code for Linux
	 */
	switch (comp_status) {
	case CS_COMPLETE:
2227
	case CS_QUEUE_FULL:
L
Linus Torvalds 已提交
2228
		if (scsi_status == 0) {
2229
			res = DID_OK << 16;
L
Linus Torvalds 已提交
2230 2231 2232
			break;
		}
		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2233
			resid = resid_len;
2234
			scsi_set_resid(cp, resid);
2235 2236

			if (!lscsi_status &&
2237
			    ((unsigned)(scsi_bufflen(cp) - resid) <
2238
			     cp->underflow)) {
2239
				ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2240
				    "Mid-layer underflow "
2241
				    "detected (0x%x of 0x%x bytes).\n",
2242
				    resid, scsi_bufflen(cp));
2243

2244
				res = DID_ERROR << 16;
2245 2246
				break;
			}
L
Linus Torvalds 已提交
2247
		}
2248
		res = DID_OK << 16 | lscsi_status;
L
Linus Torvalds 已提交
2249

2250
		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2251
			ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2252
			    "QUEUE FULL detected.\n");
2253 2254
			break;
		}
2255
		logit = 0;
L
Linus Torvalds 已提交
2256 2257 2258
		if (lscsi_status != SS_CHECK_CONDITION)
			break;

2259
		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
L
Linus Torvalds 已提交
2260 2261 2262
		if (!(scsi_status & SS_SENSE_LEN_VALID))
			break;

2263
		qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2264
		    rsp, res);
L
Linus Torvalds 已提交
2265 2266 2267
		break;

	case CS_DATA_UNDERRUN:
2268
		/* Use F/W calculated residual length. */
2269 2270 2271 2272
		resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
		scsi_set_resid(cp, resid);
		if (scsi_status & SS_RESIDUAL_UNDER) {
			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2273
				ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2274 2275 2276
				    "Dropped frame(s) detected "
				    "(0x%x of 0x%x bytes).\n",
				    resid, scsi_bufflen(cp));
2277

2278
				res = DID_ERROR << 16 | lscsi_status;
2279
				goto check_scsi_status;
2280
			}
2281

2282 2283 2284
			if (!lscsi_status &&
			    ((unsigned)(scsi_bufflen(cp) - resid) <
			    cp->underflow)) {
2285
				ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2286
				    "Mid-layer underflow "
2287
				    "detected (0x%x of 0x%x bytes).\n",
2288
				    resid, scsi_bufflen(cp));
2289

2290
				res = DID_ERROR << 16;
2291 2292
				break;
			}
2293 2294 2295 2296 2297 2298 2299
		} else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
			    lscsi_status != SAM_STAT_BUSY) {
			/*
			 * scsi status of task set and busy are considered to be
			 * task not completed.
			 */

2300
			ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2301
			    "Dropped frame(s) detected (0x%x "
2302 2303
			    "of 0x%x bytes).\n", resid,
			    scsi_bufflen(cp));
2304

2305
			res = DID_ERROR << 16 | lscsi_status;
2306
			goto check_scsi_status;
2307 2308 2309 2310
		} else {
			ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
			    scsi_status, lscsi_status);
L
Linus Torvalds 已提交
2311 2312
		}

2313
		res = DID_OK << 16 | lscsi_status;
2314
		logit = 0;
2315

2316
check_scsi_status:
L
Linus Torvalds 已提交
2317
		/*
A
Andrew Vasquez 已提交
2318
		 * Check to see if SCSI Status is non zero. If so report SCSI
L
Linus Torvalds 已提交
2319 2320 2321
		 * Status.
		 */
		if (lscsi_status != 0) {
2322
			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2323
				ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2324
				    "QUEUE FULL detected.\n");
2325
				logit = 1;
2326 2327
				break;
			}
L
Linus Torvalds 已提交
2328 2329 2330
			if (lscsi_status != SS_CHECK_CONDITION)
				break;

2331
			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
L
Linus Torvalds 已提交
2332 2333 2334
			if (!(scsi_status & SS_SENSE_LEN_VALID))
				break;

2335
			qla2x00_handle_sense(sp, sense_data, par_sense_len,
2336
			    sense_len, rsp, res);
L
Linus Torvalds 已提交
2337 2338 2339 2340 2341 2342 2343 2344
		}
		break;

	case CS_PORT_LOGGED_OUT:
	case CS_PORT_CONFIG_CHG:
	case CS_PORT_BUSY:
	case CS_INCOMPLETE:
	case CS_PORT_UNAVAILABLE:
2345
	case CS_TIMEOUT:
2346 2347
	case CS_RESET:

2348 2349 2350 2351 2352
		/*
		 * We are going to have the fc class block the rport
		 * while we try to recover so instruct the mid layer
		 * to requeue until the class decides how to handle this.
		 */
2353
		res = DID_TRANSPORT_DISRUPTED << 16;
2354 2355 2356 2357 2358 2359 2360 2361 2362

		if (comp_status == CS_TIMEOUT) {
			if (IS_FWI2_CAPABLE(ha))
				break;
			else if ((le16_to_cpu(sts->status_flags) &
			    SF_LOGOUT_SENT) == 0)
				break;
		}

2363
		ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2364 2365 2366 2367
		    "Port to be marked lost on fcport=%02x%02x%02x, current "
		    "port state= %s.\n", fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
		    port_state_str[atomic_read(&fcport->state)]);
2368

2369
		if (atomic_read(&fcport->state) == FCS_ONLINE)
2370
			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
L
Linus Torvalds 已提交
2371 2372 2373
		break;

	case CS_ABORTED:
2374
		res = DID_RESET << 16;
L
Linus Torvalds 已提交
2375
		break;
2376 2377

	case CS_DIF_ERROR:
2378
		logit = qla2x00_handle_dif_error(sp, sts24);
2379
		res = cp->result;
2380
		break;
2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393

	case CS_TRANSPORT:
		res = DID_ERROR << 16;

		if (!IS_PI_SPLIT_DET_CAPABLE(ha))
			break;

		if (state_flags & BIT_4)
			scmd_printk(KERN_WARNING, cp,
			    "Unsupported device '%s' found.\n",
			    cp->device->vendor);
		break;

L
Linus Torvalds 已提交
2394
	default:
2395
		res = DID_ERROR << 16;
L
Linus Torvalds 已提交
2396 2397 2398
		break;
	}

2399 2400
out:
	if (logit)
2401
		ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
H
Hannes Reinecke 已提交
2402
		    "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2403
		    "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2404
		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2405
		    comp_status, scsi_status, res, vha->host_no,
2406 2407
		    cp->device->id, cp->device->lun, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2408
		    cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2409
		    resid_len, fw_resid_len, sp, cp);
2410

2411
	if (rsp->status_srb == NULL)
2412
		sp->done(ha, sp, res);
L
Linus Torvalds 已提交
2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
}

/**
 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
 * @ha: SCSI driver HA context
 * @pkt: Entry pointer
 *
 * Extended sense data.
 */
static void
2423
qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
L
Linus Torvalds 已提交
2424
{
2425
	uint8_t	sense_sz = 0;
2426
	struct qla_hw_data *ha = rsp->hw;
2427
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2428
	srb_t *sp = rsp->status_srb;
L
Linus Torvalds 已提交
2429
	struct scsi_cmnd *cp;
2430 2431
	uint32_t sense_len;
	uint8_t *sense_ptr;
L
Linus Torvalds 已提交
2432

2433 2434
	if (!sp || !GET_CMD_SENSE_LEN(sp))
		return;
L
Linus Torvalds 已提交
2435

2436 2437
	sense_len = GET_CMD_SENSE_LEN(sp);
	sense_ptr = GET_CMD_SENSE_PTR(sp);
L
Linus Torvalds 已提交
2438

2439 2440 2441 2442
	cp = GET_CMD_SP(sp);
	if (cp == NULL) {
		ql_log(ql_log_warn, vha, 0x3025,
		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
L
Linus Torvalds 已提交
2443

2444 2445
		rsp->status_srb = NULL;
		return;
L
Linus Torvalds 已提交
2446 2447
	}

2448 2449 2450 2451
	if (sense_len > sizeof(pkt->data))
		sense_sz = sizeof(pkt->data);
	else
		sense_sz = sense_len;
2452

2453 2454 2455 2456 2457 2458
	/* Move sense data. */
	if (IS_FWI2_CAPABLE(ha))
		host_to_fcp_swap(pkt->data, sizeof(pkt->data));
	memcpy(sense_ptr, pkt->data, sense_sz);
	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
		sense_ptr, sense_sz);
2459

2460 2461
	sense_len -= sense_sz;
	sense_ptr += sense_sz;
2462

2463 2464 2465 2466 2467 2468 2469
	SET_CMD_SENSE_PTR(sp, sense_ptr);
	SET_CMD_SENSE_LEN(sp, sense_len);

	/* Place command on done queue. */
	if (sense_len == 0) {
		rsp->status_srb = NULL;
		sp->done(ha, sp, cp->result);
2470 2471 2472
	}
}

L
Linus Torvalds 已提交
2473 2474 2475 2476 2477 2478
/**
 * qla2x00_error_entry() - Process an error entry.
 * @ha: SCSI driver HA context
 * @pkt: Entry pointer
 */
static void
2479
qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
L
Linus Torvalds 已提交
2480 2481
{
	srb_t *sp;
2482
	struct qla_hw_data *ha = vha->hw;
2483
	const char func[] = "ERROR-IOCB";
2484
	uint16_t que = MSW(pkt->handle);
2485
	struct req_que *req = NULL;
2486
	int res = DID_ERROR << 16;
2487

2488 2489 2490
	ql_dbg(ql_dbg_async, vha, 0x502a,
	    "type of error status in response: 0x%x\n", pkt->entry_status);

2491 2492 2493 2494 2495
	if (que >= ha->max_req_queues || !ha->req_q_map[que])
		goto fatal;

	req = ha->req_q_map[que];

2496 2497
	if (pkt->entry_status & RF_BUSY)
		res = DID_BUS_BUSY << 16;
L
Linus Torvalds 已提交
2498

2499
	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2500
	if (sp) {
2501
		sp->done(ha, sp, res);
2502
		return;
L
Linus Torvalds 已提交
2503
	}
2504 2505
fatal:
	ql_log(ql_log_warn, vha, 0x5030,
2506
	    "Error entry - invalid handle/queue (%04x).\n", que);
L
Linus Torvalds 已提交
2507 2508
}

2509 2510 2511 2512 2513 2514
/**
 * qla24xx_mbx_completion() - Process mailbox command completions.
 * @ha: SCSI driver HA context
 * @mb0: Mailbox0 register
 */
static void
2515
qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2516 2517
{
	uint16_t	cnt;
2518
	uint32_t	mboxes;
2519
	uint16_t __iomem *wptr;
2520
	struct qla_hw_data *ha = vha->hw;
2521 2522
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

2523 2524 2525
	/* Read all mbox registers? */
	mboxes = (1 << ha->mbx_count) - 1;
	if (!ha->mcp)
2526
		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2527 2528 2529
	else
		mboxes = ha->mcp->in_mb;

2530 2531 2532
	/* Load return mailbox registers. */
	ha->flags.mbox_int = 1;
	ha->mailbox_out[0] = mb0;
2533
	mboxes >>= 1;
2534 2535 2536
	wptr = (uint16_t __iomem *)&reg->mailbox1;

	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2537 2538 2539 2540
		if (mboxes & BIT_0)
			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);

		mboxes >>= 1;
2541 2542 2543 2544
		wptr++;
	}
}

2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
static void
qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
	struct abort_entry_24xx *pkt)
{
	const char func[] = "ABT_IOCB";
	srb_t *sp;
	struct srb_iocb *abt;

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;

	abt = &sp->u.iocb_cmd;
	abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
	sp->done(vha, sp, 0);
}

2562 2563 2564 2565
/**
 * qla24xx_process_response_queue() - Process response queue entries.
 * @ha: SCSI driver HA context
 */
2566 2567
void qla24xx_process_response_queue(struct scsi_qla_host *vha,
	struct rsp_que *rsp)
2568 2569
{
	struct sts_entry_24xx *pkt;
2570
	struct qla_hw_data *ha = vha->hw;
2571

2572
	if (!vha->flags.online)
2573 2574
		return;

2575
	if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
2576 2577 2578 2579 2580 2581 2582
		/* if kernel does not notify qla of IRQ's CPU change,
		 * then set it here.
		 */
		rsp->msix->cpuid = smp_processor_id();
		ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
	}

2583 2584
	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2585

2586 2587 2588 2589
		rsp->ring_index++;
		if (rsp->ring_index == rsp->length) {
			rsp->ring_index = 0;
			rsp->ring_ptr = rsp->ring;
2590
		} else {
2591
			rsp->ring_ptr++;
2592 2593 2594
		}

		if (pkt->entry_status != 0) {
2595
			qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2596

2597 2598
			if (qlt_24xx_process_response_error(vha, pkt))
				goto process_err;
2599

2600 2601 2602 2603
			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
			wmb();
			continue;
		}
2604
process_err:
2605 2606 2607

		switch (pkt->entry_type) {
		case STATUS_TYPE:
2608
			qla2x00_status_entry(vha, rsp, pkt);
2609 2610
			break;
		case STATUS_CONT_TYPE:
2611
			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2612
			break;
2613
		case VP_RPT_ID_IOCB_TYPE:
2614
			qla24xx_report_id_acquisition(vha,
2615 2616
			    (struct vp_rpt_id_entry_24xx *)pkt);
			break;
2617 2618 2619 2620
		case LOGINOUT_PORT_IOCB_TYPE:
			qla24xx_logio_entry(vha, rsp->req,
			    (struct logio_entry_24xx *)pkt);
			break;
2621
		case CT_IOCB_TYPE:
2622 2623
			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
			break;
2624
		case ELS_IOCB_TYPE:
2625 2626
			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
			break;
2627
		case ABTS_RECV_24XX:
2628 2629 2630 2631 2632 2633 2634 2635
			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
				/* ensure that the ATIO queue is empty */
				qlt_handle_abts_recv(vha, (response_t *)pkt);
				break;
			} else {
				/* drop through */
				qlt_24xx_process_atio_queue(vha, 1);
			}
2636 2637 2638
		case ABTS_RESP_24XX:
		case CTIO_TYPE7:
		case NOTIFY_ACK_TYPE:
2639
		case CTIO_CRC2:
2640 2641
			qlt_response_pkt_all_vps(vha, (response_t *)pkt);
			break;
2642 2643 2644 2645 2646
		case MARKER_TYPE:
			/* Do nothing in this case, this check is to prevent it
			 * from falling into default case
			 */
			break;
2647 2648 2649 2650
		case ABORT_IOCB_TYPE:
			qla24xx_abort_iocb_entry(vha, rsp->req,
			    (struct abort_entry_24xx *)pkt);
			break;
2651 2652
		default:
			/* Type Not Supported. */
2653 2654
			ql_dbg(ql_dbg_async, vha, 0x5042,
			    "Received unknown response pkt type %x "
2655
			    "entry status=%x.\n",
2656
			    pkt->entry_type, pkt->entry_status);
2657 2658 2659 2660 2661 2662 2663
			break;
		}
		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
		wmb();
	}

	/* Adjust ring index */
2664
	if (IS_P3P_TYPE(ha)) {
2665 2666 2667 2668
		struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
		WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
	} else
		WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2669 2670
}

2671
static void
2672
qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2673 2674 2675
{
	int rval;
	uint32_t cnt;
2676
	struct qla_hw_data *ha = vha->hw;
2677 2678
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

2679 2680
	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
	    !IS_QLA27XX(ha))
2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
		return;

	rval = QLA_SUCCESS;
	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
	RD_REG_DWORD(&reg->iobase_addr);
	WRT_REG_DWORD(&reg->iobase_window, 0x0001);
	for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt) {
			WRT_REG_DWORD(&reg->iobase_window, 0x0001);
			udelay(10);
		} else
			rval = QLA_FUNCTION_TIMEOUT;
	}
	if (rval == QLA_SUCCESS)
		goto next_test;

2698
	rval = QLA_SUCCESS;
2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712
	WRT_REG_DWORD(&reg->iobase_window, 0x0003);
	for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt) {
			WRT_REG_DWORD(&reg->iobase_window, 0x0003);
			udelay(10);
		} else
			rval = QLA_FUNCTION_TIMEOUT;
	}
	if (rval != QLA_SUCCESS)
		goto done;

next_test:
	if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2713 2714
		ql_log(ql_log_info, vha, 0x504c,
		    "Additional code -- 0x55AA.\n");
2715 2716 2717 2718 2719 2720

done:
	WRT_REG_DWORD(&reg->iobase_window, 0x0000);
	RD_REG_DWORD(&reg->iobase_window);
}

2721
/**
2722
 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2723 2724 2725 2726 2727 2728 2729 2730
 * @irq:
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
2731
qla24xx_intr_handler(int irq, void *dev_id)
2732
{
2733 2734
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
2735 2736 2737 2738 2739
	struct device_reg_24xx __iomem *reg;
	int		status;
	unsigned long	iter;
	uint32_t	stat;
	uint32_t	hccr;
2740
	uint16_t	mb[8];
2741
	struct rsp_que *rsp;
2742
	unsigned long	flags;
2743

2744 2745
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
2746 2747
		ql_log(ql_log_info, NULL, 0x5059,
		    "%s: NULL response queue pointer.\n", __func__);
2748 2749 2750
		return IRQ_NONE;
	}

2751
	ha = rsp->hw;
2752 2753 2754
	reg = &ha->iobase->isp24;
	status = 0;

2755 2756 2757
	if (unlikely(pci_channel_offline(ha->pdev)))
		return IRQ_HANDLED;

2758
	spin_lock_irqsave(&ha->hardware_lock, flags);
2759
	vha = pci_get_drvdata(ha->pdev);
2760 2761
	for (iter = 50; iter--; ) {
		stat = RD_REG_DWORD(&reg->host_status);
2762
		if (qla2x00_check_reg32_for_disconnect(vha, stat))
2763
			break;
2764
		if (stat & HSRX_RISC_PAUSED) {
2765
			if (unlikely(pci_channel_offline(ha->pdev)))
2766 2767
				break;

2768 2769
			hccr = RD_REG_DWORD(&reg->hccr);

2770 2771 2772
			ql_log(ql_log_warn, vha, 0x504b,
			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
			    hccr);
2773

2774
			qla2xxx_check_risc_status(vha);
2775

2776 2777
			ha->isp_ops->fw_dump(vha, 1);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2778 2779 2780 2781 2782
			break;
		} else if ((stat & HSRX_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
2783 2784 2785 2786
		case INTR_ROM_MB_SUCCESS:
		case INTR_ROM_MB_FAILED:
		case INTR_MB_SUCCESS:
		case INTR_MB_FAILED:
2787
			qla24xx_mbx_completion(vha, MSW(stat));
2788 2789 2790
			status |= MBX_INTERRUPT;

			break;
2791
		case INTR_ASYNC_EVENT:
2792 2793 2794 2795
			mb[0] = MSW(stat);
			mb[1] = RD_REG_WORD(&reg->mailbox1);
			mb[2] = RD_REG_WORD(&reg->mailbox2);
			mb[3] = RD_REG_WORD(&reg->mailbox3);
2796
			qla2x00_async_event(vha, rsp, mb);
2797
			break;
2798 2799
		case INTR_RSP_QUE_UPDATE:
		case INTR_RSP_QUE_UPDATE_83XX:
2800
			qla24xx_process_response_queue(vha, rsp);
2801
			break;
2802 2803 2804 2805 2806
		case INTR_ATIO_QUE_UPDATE:{
			unsigned long flags2;
			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
			qlt_24xx_process_atio_queue(vha, 1);
			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2807
			break;
2808 2809 2810 2811 2812 2813 2814
		}
		case INTR_ATIO_RSP_QUE_UPDATE: {
			unsigned long flags2;
			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
			qlt_24xx_process_atio_queue(vha, 1);
			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);

2815 2816
			qla24xx_process_response_queue(vha, rsp);
			break;
2817
		}
2818
		default:
2819 2820
			ql_dbg(ql_dbg_async, vha, 0x504f,
			    "Unrecognized interrupt type (%d).\n", stat * 0xff);
2821 2822 2823 2824
			break;
		}
		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
		RD_REG_DWORD_RELAXED(&reg->hccr);
2825 2826
		if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
			ndelay(3500);
2827
	}
2828
	qla2x00_handle_mbx_completion(ha, status);
2829
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2830 2831 2832 2833

	return IRQ_HANDLED;
}

2834 2835 2836
static irqreturn_t
qla24xx_msix_rsp_q(int irq, void *dev_id)
{
2837 2838
	struct qla_hw_data *ha;
	struct rsp_que *rsp;
2839
	struct device_reg_24xx __iomem *reg;
2840
	struct scsi_qla_host *vha;
2841
	unsigned long flags;
2842
	uint32_t stat = 0;
2843

2844 2845
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
2846 2847
		ql_log(ql_log_info, NULL, 0x505a,
		    "%s: NULL response queue pointer.\n", __func__);
2848 2849 2850
		return IRQ_NONE;
	}
	ha = rsp->hw;
2851 2852
	reg = &ha->iobase->isp24;

2853
	spin_lock_irqsave(&ha->hardware_lock, flags);
2854

2855
	vha = pci_get_drvdata(ha->pdev);
2856 2857 2858 2859 2860
	/*
	 * Use host_status register to check to PCI disconnection before we
	 * we process the response queue.
	 */
	stat = RD_REG_DWORD(&reg->host_status);
2861
	if (qla2x00_check_reg32_for_disconnect(vha, stat))
2862
		goto out;
2863
	qla24xx_process_response_queue(vha, rsp);
2864
	if (!ha->flags.disable_msix_handshake) {
2865 2866 2867
		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
		RD_REG_DWORD_RELAXED(&reg->hccr);
	}
2868
out:
2869
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2870 2871 2872 2873 2874 2875 2876

	return IRQ_HANDLED;
}

static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
2877 2878 2879
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
	struct rsp_que *rsp;
2880 2881 2882 2883
	struct device_reg_24xx __iomem *reg;
	int		status;
	uint32_t	stat;
	uint32_t	hccr;
2884
	uint16_t	mb[8];
2885
	unsigned long flags;
2886

2887 2888
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
2889 2890
		ql_log(ql_log_info, NULL, 0x505c,
		    "%s: NULL response queue pointer.\n", __func__);
2891 2892 2893
		return IRQ_NONE;
	}
	ha = rsp->hw;
2894 2895 2896
	reg = &ha->iobase->isp24;
	status = 0;

2897
	spin_lock_irqsave(&ha->hardware_lock, flags);
2898
	vha = pci_get_drvdata(ha->pdev);
2899
	do {
2900
		stat = RD_REG_DWORD(&reg->host_status);
2901
		if (qla2x00_check_reg32_for_disconnect(vha, stat))
2902
			break;
2903
		if (stat & HSRX_RISC_PAUSED) {
2904
			if (unlikely(pci_channel_offline(ha->pdev)))
2905 2906
				break;

2907 2908
			hccr = RD_REG_DWORD(&reg->hccr);

2909 2910 2911
			ql_log(ql_log_info, vha, 0x5050,
			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
			    hccr);
2912

2913
			qla2xxx_check_risc_status(vha);
2914

2915 2916
			ha->isp_ops->fw_dump(vha, 1);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2917 2918 2919 2920 2921
			break;
		} else if ((stat & HSRX_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
2922 2923 2924 2925
		case INTR_ROM_MB_SUCCESS:
		case INTR_ROM_MB_FAILED:
		case INTR_MB_SUCCESS:
		case INTR_MB_FAILED:
2926
			qla24xx_mbx_completion(vha, MSW(stat));
2927 2928 2929
			status |= MBX_INTERRUPT;

			break;
2930
		case INTR_ASYNC_EVENT:
2931 2932 2933 2934
			mb[0] = MSW(stat);
			mb[1] = RD_REG_WORD(&reg->mailbox1);
			mb[2] = RD_REG_WORD(&reg->mailbox2);
			mb[3] = RD_REG_WORD(&reg->mailbox3);
2935
			qla2x00_async_event(vha, rsp, mb);
2936
			break;
2937 2938
		case INTR_RSP_QUE_UPDATE:
		case INTR_RSP_QUE_UPDATE_83XX:
2939
			qla24xx_process_response_queue(vha, rsp);
2940
			break;
2941 2942 2943 2944 2945
		case INTR_ATIO_QUE_UPDATE:{
			unsigned long flags2;
			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
			qlt_24xx_process_atio_queue(vha, 1);
			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2946
			break;
2947 2948 2949 2950 2951 2952 2953
		}
		case INTR_ATIO_RSP_QUE_UPDATE: {
			unsigned long flags2;
			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
			qlt_24xx_process_atio_queue(vha, 1);
			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);

2954 2955
			qla24xx_process_response_queue(vha, rsp);
			break;
2956
		}
2957
		default:
2958 2959
			ql_dbg(ql_dbg_async, vha, 0x5051,
			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
2960 2961 2962
			break;
		}
		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2963
	} while (0);
2964
	qla2x00_handle_mbx_completion(ha, status);
2965
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2966 2967 2968 2969

	return IRQ_HANDLED;
}

2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998
irqreturn_t
qla2xxx_msix_rsp_q(int irq, void *dev_id)
{
	struct qla_hw_data *ha;
	struct qla_qpair *qpair;
	struct device_reg_24xx __iomem *reg;
	unsigned long flags;

	qpair = dev_id;
	if (!qpair) {
		ql_log(ql_log_info, NULL, 0x505b,
		    "%s: NULL response queue pointer.\n", __func__);
		return IRQ_NONE;
	}
	ha = qpair->hw;

	/* Clear the interrupt, if enabled, for this response queue */
	if (unlikely(!ha->flags.disable_msix_handshake)) {
		reg = &ha->iobase->isp24;
		spin_lock_irqsave(&ha->hardware_lock, flags);
		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
	}

	queue_work(ha->wq, &qpair->q_work);

	return IRQ_HANDLED;
}

2999 3000 3001 3002
/* Interrupt handling helpers. */

struct qla_init_msix_entry {
	const char *name;
3003
	irq_handler_t handler;
3004 3005
};

3006
static struct qla_init_msix_entry msix_entries[] = {
3007 3008
	{ "qla2xxx (default)", qla24xx_msix_default },
	{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3009
	{ "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3010
	{ "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
3011 3012
};

3013
static struct qla_init_msix_entry qla82xx_msix_entries[] = {
3014 3015 3016 3017
	{ "qla2xxx (default)", qla82xx_msix_default },
	{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};

3018
static int
3019
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3020
{
3021
#define MIN_MSIX_COUNT	2
3022 3023
	int i, ret;
	struct qla_msix_entry *qentry;
3024
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3025

3026
	ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
3027
				    PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3028 3029 3030 3031 3032 3033 3034
	if (ret < 0) {
		ql_log(ql_log_fatal, vha, 0x00c7,
		    "MSI-X: Failed to enable support, "
		    "giving   up -- %d/%d.\n",
		    ha->msix_count, ret);
		goto msix_out;
	} else if (ret < ha->msix_count) {
3035 3036
		ql_log(ql_log_warn, vha, 0x00c6,
		    "MSI-X: Failed to enable support "
3037 3038
		     "with %d vectors, using %d vectors.\n",
		    ha->msix_count, ret);
Q
Quinn Tran 已提交
3039
		ha->msix_count = ret;
3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
		/* Recalculate queue values */
		if (ha->mqiobase && ql2xmqsupport) {
			ha->max_req_queues = ha->msix_count - 1;

			/* ATIOQ needs 1 vector. That's 1 less QPair */
			if (QLA_TGT_MODE_ENABLED())
				ha->max_req_queues--;

			ha->max_rsp_queues = ha->max_req_queues;

			ha->max_qpairs = ha->max_req_queues - 1;
			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
			    "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
		}
3054 3055 3056 3057
	}
	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
				ha->msix_count, GFP_KERNEL);
	if (!ha->msix_entries) {
3058 3059
		ql_log(ql_log_fatal, vha, 0x00c8,
		    "Failed to allocate memory for ha->msix_entries.\n");
3060
		ret = -ENOMEM;
3061 3062 3063 3064
		goto msix_out;
	}
	ha->flags.msix_enabled = 1;

3065 3066
	for (i = 0; i < ha->msix_count; i++) {
		qentry = &ha->msix_entries[i];
3067 3068
		qentry->vector = pci_irq_vector(ha->pdev, i);
		qentry->entry = i;
3069
		qentry->have_irq = 0;
3070
		qentry->in_use = 0;
3071
		qentry->handle = NULL;
3072 3073 3074
		qentry->irq_notify.notify  = qla_irq_affinity_notify;
		qentry->irq_notify.release = qla_irq_affinity_release;
		qentry->cpuid = -1;
3075 3076
	}

3077
	/* Enable MSI-X vectors for the base queue */
3078
	for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
3079
		qentry = &ha->msix_entries[i];
3080
		qentry->handle = rsp;
3081
		rsp->msix = qentry;
3082
		scnprintf(qentry->name, sizeof(qentry->name),
3083
		    "%s", msix_entries[i].name);
3084
		if (IS_P3P_TYPE(ha))
3085 3086 3087
			ret = request_irq(qentry->vector,
				qla82xx_msix_entries[i].handler,
				0, qla82xx_msix_entries[i].name, rsp);
3088
		else
3089 3090 3091
			ret = request_irq(qentry->vector,
				msix_entries[i].handler,
				0, msix_entries[i].name, rsp);
3092 3093 3094
		if (ret)
			goto msix_register_fail;
		qentry->have_irq = 1;
3095
		qentry->in_use = 1;
3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107

		/* Register for CPU affinity notification. */
		irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);

		/* Schedule work (ie. trigger a notification) to read cpu
		 * mask for this specific irq.
		 * kref_get is required because
		* irq_affinity_notify() will do
		* kref_put().
		*/
		kref_get(&qentry->irq_notify.kref);
		schedule_work(&qentry->irq_notify.work);
3108 3109 3110 3111 3112 3113 3114
	}

	/*
	 * If target mode is enable, also request the vector for the ATIO
	 * queue.
	 */
	if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3115
		qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3116
		rsp->msix = qentry;
3117 3118
		qentry->handle = rsp;
		scnprintf(qentry->name, sizeof(qentry->name),
3119
		    "%s", msix_entries[QLA_ATIO_VECTOR].name);
3120
		qentry->in_use = 1;
3121
		ret = request_irq(qentry->vector,
3122 3123
			msix_entries[QLA_ATIO_VECTOR].handler,
			0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
3124
		qentry->have_irq = 1;
3125 3126
	}

3127 3128 3129 3130 3131
msix_register_fail:
	if (ret) {
		ql_log(ql_log_fatal, vha, 0x00cb,
		    "MSI-X: unable to register handler -- %x/%d.\n",
		    qentry->vector, ret);
3132
		qla2x00_free_irqs(vha);
3133 3134 3135 3136
		ha->mqenable = 0;
		goto msix_out;
	}

3137
	/* Enable MSI-X vector for response queue update for queue 0 */
3138
	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3139
		if (ha->msixbase && ha->mqiobase &&
3140 3141
		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
		     ql2xmqsupport))
3142 3143
			ha->mqenable = 1;
	} else
3144 3145 3146
		if (ha->mqiobase &&
		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
		     ql2xmqsupport))
3147
			ha->mqenable = 1;
3148 3149 3150 3151 3152 3153
	ql_dbg(ql_dbg_multiq, vha, 0xc005,
	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
	ql_dbg(ql_dbg_init, vha, 0x0055,
	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3154

3155 3156 3157 3158 3159
msix_out:
	return ret;
}

int
3160
qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3161
{
3162
	int ret = QLA_FUNCTION_FAILED;
3163
	device_reg_t *reg = ha->iobase;
3164
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3165 3166

	/* If possible, enable MSI-X. */
3167
	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3168 3169
	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
	    !IS_QLA27XX(ha))
3170 3171 3172 3173 3174 3175
		goto skip_msi;

	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
		(ha->pdev->subsystem_device == 0x7040 ||
		ha->pdev->subsystem_device == 0x7041 ||
		ha->pdev->subsystem_device == 0x1705)) {
3176 3177
		ql_log(ql_log_warn, vha, 0x0034,
		    "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3178
			ha->pdev->subsystem_vendor,
3179
			ha->pdev->subsystem_device);
3180 3181
		goto skip_msi;
	}
3182

3183
	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3184 3185
		ql_log(ql_log_warn, vha, 0x0035,
		    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3186
		    ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3187 3188 3189
		goto skip_msix;
	}

3190
	ret = qla24xx_enable_msix(ha, rsp);
3191
	if (!ret) {
3192 3193 3194
		ql_dbg(ql_dbg_init, vha, 0x0036,
		    "MSI-X: Enabled (0x%X, 0x%X).\n",
		    ha->chip_revision, ha->fw_attributes);
3195
		goto clear_risc_ints;
3196
	}
3197

3198
skip_msix:
3199

3200 3201 3202
	ql_log(ql_log_info, vha, 0x0037,
	    "Falling back-to MSI mode -%d.\n", ret);

3203
	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3204 3205
	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
	    !IS_QLA27XX(ha))
3206 3207
		goto skip_msi;

3208
	ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3209
	if (!ret) {
3210 3211
		ql_dbg(ql_dbg_init, vha, 0x0038,
		    "MSI: Enabled.\n");
3212
		ha->flags.msi_enabled = 1;
3213
	} else
3214
		ql_log(ql_log_warn, vha, 0x0039,
3215 3216
		    "Falling back-to INTa mode -- %d.\n", ret);
skip_msi:
3217 3218 3219 3220 3221

	/* Skip INTx on ISP82xx. */
	if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
		return QLA_FUNCTION_FAILED;

3222
	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3223 3224
	    ha->flags.msi_enabled ? 0 : IRQF_SHARED,
	    QLA2XXX_DRIVER_NAME, rsp);
3225
	if (ret) {
3226
		ql_log(ql_log_warn, vha, 0x003a,
3227 3228
		    "Failed to reserve interrupt %d already in use.\n",
		    ha->pdev->irq);
3229
		goto fail;
3230
	} else if (!ha->flags.msi_enabled) {
3231 3232
		ql_dbg(ql_dbg_init, vha, 0x0125,
		    "INTa mode: Enabled.\n");
3233 3234
		ha->flags.mr_intr_valid = 1;
	}
3235

3236
clear_risc_ints:
3237 3238
	if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
		goto fail;
3239

3240
	spin_lock_irq(&ha->hardware_lock);
3241
	WRT_REG_WORD(&reg->isp.semaphore, 0);
3242
	spin_unlock_irq(&ha->hardware_lock);
3243

3244
fail:
3245 3246 3247 3248
	return ret;
}

void
3249
qla2x00_free_irqs(scsi_qla_host_t *vha)
3250
{
3251
	struct qla_hw_data *ha = vha->hw;
3252
	struct rsp_que *rsp;
3253 3254
	struct qla_msix_entry *qentry;
	int i;
3255 3256 3257 3258 3259 3260 3261 3262

	/*
	 * We need to check that ha->rsp_q_map is valid in case we are called
	 * from a probe failure context.
	 */
	if (!ha->rsp_q_map || !ha->rsp_q_map[0])
		return;
	rsp = ha->rsp_q_map[0];
3263

3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
	if (ha->flags.msix_enabled) {
		for (i = 0; i < ha->msix_count; i++) {
			qentry = &ha->msix_entries[i];
			if (qentry->have_irq) {
				irq_set_affinity_notifier(qentry->vector, NULL);
				free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
			}
		}
		kfree(ha->msix_entries);
		ha->msix_entries = NULL;
		ha->flags.msix_enabled = 0;
		ql_dbg(ql_dbg_init, vha, 0x0042,
			"Disabled MSI-X.\n");
	} else {
		free_irq(pci_irq_vector(ha->pdev, 0), rsp);
	}
3280

3281
	pci_free_irq_vectors(ha->pdev);
3282
}
3283

3284 3285
int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
	struct qla_msix_entry *msix, int vector_type)
3286
{
3287
	struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3288
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3289 3290
	int ret;

3291 3292 3293
	scnprintf(msix->name, sizeof(msix->name),
	    "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
	ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3294
	if (ret) {
3295 3296 3297
		ql_log(ql_log_fatal, vha, 0x00e6,
		    "MSI-X: Unable to register handler -- %x/%d.\n",
		    msix->vector, ret);
3298 3299 3300
		return ret;
	}
	msix->have_irq = 1;
3301
	msix->handle = qpair;
3302 3303
	return ret;
}
3304 3305 3306 3307 3308 3309 3310 3311 3312 3313


/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
	const cpumask_t *mask)
{
	struct qla_msix_entry *e =
		container_of(notify, struct qla_msix_entry, irq_notify);
	struct qla_hw_data *ha;
	struct scsi_qla_host *base_vha;
3314
	struct rsp_que *rsp = e->handle;
3315 3316 3317 3318

	/* user is recommended to set mask to just 1 cpu */
	e->cpuid = cpumask_first(mask);

3319
	ha = rsp->hw;
3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
	base_vha = pci_get_drvdata(ha->pdev);

	ql_dbg(ql_dbg_init, base_vha, 0xffff,
	    "%s: host %ld : vector %d cpu %d \n", __func__,
	    base_vha->host_no, e->vector, e->cpuid);

	if (e->have_irq) {
		if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
		    (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
			ha->tgt.rspq_vector_cpuid = e->cpuid;
			ql_dbg(ql_dbg_init, base_vha, 0xffff,
			    "%s: host%ld: rspq vector %d cpu %d  runtime change\n",
			    __func__, base_vha->host_no, e->vector, e->cpuid);
		}
	}
}

3337
static void qla_irq_affinity_release(struct kref *ref)
3338 3339 3340 3341 3342
{
	struct irq_affinity_notify *notify =
		container_of(ref, struct irq_affinity_notify, kref);
	struct qla_msix_entry *e =
		container_of(notify, struct qla_msix_entry, irq_notify);
3343 3344
	struct rsp_que *rsp = e->handle;
	struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
3345 3346

	ql_dbg(ql_dbg_init, base_vha, 0xffff,
3347
		"%s: host%ld: vector %d cpu %d\n", __func__,
3348 3349
	    base_vha->host_no, e->vector, e->cpuid);
}