qla_isr.c 123.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2
/*
3
 * QLogic Fibre Channel HBA Driver
4
 * Copyright (c)  2003-2014 QLogic Corporation
L
Linus Torvalds 已提交
5 6
 */
#include "qla_def.h"
7
#include "qla_target.h"
8
#include "qla_gbl.h"
L
Linus Torvalds 已提交
9

10
#include <linux/delay.h>
11
#include <linux/slab.h>
12
#include <linux/cpu.h>
13
#include <linux/t10-pi.h>
14
#include <scsi/scsi_tcq.h>
15
#include <scsi/scsi_bsg_fc.h>
16
#include <scsi/scsi_eh.h>
17 18
#include <scsi/fc/fc_fs.h>
#include <linux/nvme-fc-driver.h>
19

L
Linus Torvalds 已提交
20
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23
static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24
	sts_entry_t *);
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
	struct purex_item *item);
static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
	uint16_t size);
static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
	void *pkt);
static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
	void **pkt, struct rsp_que **rsp);

static void
qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
{
	void *pkt = &item->iocb;
	uint16_t pkt_size = item->size;

	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
	       "%s: Enter\n", __func__);

	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
	       "-------- ELS REQ -------\n");
	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
		       pkt, pkt_size);

	fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
}
50

51 52 53 54 55 56 57 58
const char *const port_state_str[] = {
	"Unknown",
	"UNCONFIGURED",
	"DEAD",
	"LOST",
	"ONLINE"
};

59
static void
60
qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
61
{
62 63
	struct abts_entry_24xx *abts =
	    (struct abts_entry_24xx *)&pkt->iocb;
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
	struct qla_hw_data *ha = vha->hw;
	struct els_entry_24xx *rsp_els;
	struct abts_entry_24xx *abts_rsp;
	dma_addr_t dma;
	uint32_t fctl;
	int rval;

	ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);

	ql_log(ql_log_warn, vha, 0x0287,
	    "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
	    abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
	    abts->seq_id, abts->seq_cnt);
	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
	    "-------- ABTS RCV -------\n");
	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
	    (uint8_t *)abts, sizeof(*abts));

	rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
	    GFP_KERNEL);
	if (!rsp_els) {
		ql_log(ql_log_warn, vha, 0x0287,
		    "Failed allocate dma buffer ABTS/ELS RSP.\n");
		return;
	}

	/* terminate exchange */
	rsp_els->entry_type = ELS_IOCB_TYPE;
	rsp_els->entry_count = 1;
93
	rsp_els->nport_handle = cpu_to_le16(~0);
94
	rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
95
	rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
	ql_dbg(ql_dbg_init, vha, 0x0283,
	    "Sending ELS Response to terminate exchange %#x...\n",
	    abts->rx_xch_addr_to_abort);
	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
	    "-------- ELS RSP -------\n");
	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
	    (uint8_t *)rsp_els, sizeof(*rsp_els));
	rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
	if (rval) {
		ql_log(ql_log_warn, vha, 0x0288,
		    "%s: iocb failed to execute -> %x\n", __func__, rval);
	} else if (rsp_els->comp_status) {
		ql_log(ql_log_warn, vha, 0x0289,
		    "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
		    __func__, rsp_els->comp_status,
		    rsp_els->error_subcode_1, rsp_els->error_subcode_2);
	} else {
		ql_dbg(ql_dbg_init, vha, 0x028a,
		    "%s: abort exchange done.\n", __func__);
	}

	/* send ABTS response */
	abts_rsp = (void *)rsp_els;
	memset(abts_rsp, 0, sizeof(*abts_rsp));
	abts_rsp->entry_type = ABTS_RSP_TYPE;
	abts_rsp->entry_count = 1;
	abts_rsp->nport_handle = abts->nport_handle;
	abts_rsp->vp_idx = abts->vp_idx;
	abts_rsp->sof_type = abts->sof_type & 0xf0;
	abts_rsp->rx_xch_addr = abts->rx_xch_addr;
	abts_rsp->d_id[0] = abts->s_id[0];
	abts_rsp->d_id[1] = abts->s_id[1];
	abts_rsp->d_id[2] = abts->s_id[2];
	abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
	abts_rsp->s_id[0] = abts->d_id[0];
	abts_rsp->s_id[1] = abts->d_id[1];
	abts_rsp->s_id[2] = abts->d_id[2];
	abts_rsp->cs_ctl = abts->cs_ctl;
	/* include flipping bit23 in fctl */
	fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
	    FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
	abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
	abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
	abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
	abts_rsp->type = FC_TYPE_BLD;
	abts_rsp->rx_id = abts->rx_id;
	abts_rsp->ox_id = abts->ox_id;
	abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
	abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
145
	abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
	abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
	ql_dbg(ql_dbg_init, vha, 0x028b,
	    "Sending BA ACC response to ABTS %#x...\n",
	    abts->rx_xch_addr_to_abort);
	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
	    "-------- ELS RSP -------\n");
	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
	    (uint8_t *)abts_rsp, sizeof(*abts_rsp));
	rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
	if (rval) {
		ql_log(ql_log_warn, vha, 0x028c,
		    "%s: iocb failed to execute -> %x\n", __func__, rval);
	} else if (abts_rsp->comp_status) {
		ql_log(ql_log_warn, vha, 0x028d,
		    "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
		    __func__, abts_rsp->comp_status,
		    abts_rsp->payload.error.subcode1,
		    abts_rsp->payload.error.subcode2);
	} else {
		ql_dbg(ql_dbg_init, vha, 0x028ea,
		    "%s: done.\n", __func__);
	}

	dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
/**
 * __qla_consume_iocb - this routine is used to tell fw driver has processed
 *   or consumed the head IOCB along with the continuation IOCB's from the
 *   provided respond queue.
 * @vha: host adapter pointer
 * @pkt: pointer to current packet.  On return, this pointer shall move
 *       to the next packet.
 * @rsp: respond queue pointer.
 *
 * it is assumed pkt is the head iocb, not the continuation iocbk
 */
void __qla_consume_iocb(struct scsi_qla_host *vha,
	void **pkt, struct rsp_que **rsp)
{
	struct rsp_que *rsp_q = *rsp;
	response_t *new_pkt;
	uint16_t entry_count_remaining;
	struct purex_entry_24xx *purex = *pkt;

	entry_count_remaining = purex->entry_count;
	while (entry_count_remaining > 0) {
		new_pkt = rsp_q->ring_ptr;
		*pkt = new_pkt;

		rsp_q->ring_index++;
		if (rsp_q->ring_index == rsp_q->length) {
			rsp_q->ring_index = 0;
			rsp_q->ring_ptr = rsp_q->ring;
		} else {
			rsp_q->ring_ptr++;
		}

		new_pkt->signature = RESPONSE_PROCESSED;
		/* flush signature */
		wmb();
		--entry_count_remaining;
	}
}

/**
 * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
 *    and save to provided buffer
 * @vha: host adapter pointer
 * @pkt: pointer Purex IOCB
 * @rsp: respond queue
 * @buf: extracted ELS payload copy here
 * @buf_len: buffer length
 */
int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
	void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
{
	struct purex_entry_24xx *purex = *pkt;
	struct rsp_que *rsp_q = *rsp;
	sts_cont_entry_t *new_pkt;
	uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
	uint16_t buffer_copy_offset = 0;
	uint16_t entry_count_remaining;
	u16 tpad;

	entry_count_remaining = purex->entry_count;
	total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
		- PURX_ELS_HEADER_SIZE;

	/*
	 * end of payload may not end in 4bytes boundary.  Need to
	 * round up / pad for room to swap, before saving data
	 */
	tpad = roundup(total_bytes, 4);

	if (buf_len < tpad) {
		ql_dbg(ql_dbg_async, vha, 0x5084,
		    "%s buffer is too small %d < %d\n",
		    __func__, buf_len, tpad);
		__qla_consume_iocb(vha, pkt, rsp);
		return -EIO;
	}

	pending_bytes = total_bytes = tpad;
	no_bytes = (pending_bytes > sizeof(purex->els_frame_payload))  ?
	    sizeof(purex->els_frame_payload) : pending_bytes;

	memcpy(buf, &purex->els_frame_payload[0], no_bytes);
	buffer_copy_offset += no_bytes;
	pending_bytes -= no_bytes;
	--entry_count_remaining;

	((response_t *)purex)->signature = RESPONSE_PROCESSED;
	/* flush signature */
	wmb();

	do {
		while ((total_bytes > 0) && (entry_count_remaining > 0)) {
			new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
			*pkt = new_pkt;

			if (new_pkt->entry_type != STATUS_CONT_TYPE) {
				ql_log(ql_log_warn, vha, 0x507a,
				    "Unexpected IOCB type, partial data 0x%x\n",
				    buffer_copy_offset);
				break;
			}

			rsp_q->ring_index++;
			if (rsp_q->ring_index == rsp_q->length) {
				rsp_q->ring_index = 0;
				rsp_q->ring_ptr = rsp_q->ring;
			} else {
				rsp_q->ring_ptr++;
			}
			no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
			    sizeof(new_pkt->data) : pending_bytes;
			if ((buffer_copy_offset + no_bytes) <= total_bytes) {
				memcpy((buf + buffer_copy_offset), new_pkt->data,
				    no_bytes);
				buffer_copy_offset += no_bytes;
				pending_bytes -= no_bytes;
				--entry_count_remaining;
			} else {
				ql_log(ql_log_warn, vha, 0x5044,
				    "Attempt to copy more that we got, optimizing..%x\n",
				    buffer_copy_offset);
				memcpy((buf + buffer_copy_offset), new_pkt->data,
				    total_bytes - buffer_copy_offset);
			}

			((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
			/* flush signature */
			wmb();
		}

		if (pending_bytes != 0 || entry_count_remaining != 0) {
			ql_log(ql_log_fatal, vha, 0x508b,
			    "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
			    total_bytes, entry_count_remaining);
			return -EIO;
		}
	} while (entry_count_remaining > 0);

	be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);

	return 0;
}

L
Linus Torvalds 已提交
315 316
/**
 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
317
 * @irq: interrupt number
L
Linus Torvalds 已提交
318 319 320 321 322 323 324
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
325
qla2100_intr_handler(int irq, void *dev_id)
L
Linus Torvalds 已提交
326
{
327 328
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
329
	struct device_reg_2xxx __iomem *reg;
L
Linus Torvalds 已提交
330 331
	int		status;
	unsigned long	iter;
332
	uint16_t	hccr;
333
	uint16_t	mb[8];
334
	struct rsp_que *rsp;
335
	unsigned long	flags;
L
Linus Torvalds 已提交
336

337 338
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
339 340
		ql_log(ql_log_info, NULL, 0x505d,
		    "%s: NULL response queue pointer.\n", __func__);
L
Linus Torvalds 已提交
341 342 343
		return (IRQ_NONE);
	}

344
	ha = rsp->hw;
345
	reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
346 347
	status = 0;

348
	spin_lock_irqsave(&ha->hardware_lock, flags);
349
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
350
	for (iter = 50; iter--; ) {
351
		hccr = rd_reg_word(&reg->hccr);
352
		if (qla2x00_check_reg16_for_disconnect(vha, hccr))
353
			break;
354 355 356 357 358 359
		if (hccr & HCCR_RISC_PAUSE) {
			if (pci_channel_offline(ha->pdev))
				break;

			/*
			 * Issue a "HARD" reset in order for the RISC interrupt
360
			 * bit to be cleared.  Schedule a big hammer to get
361 362
			 * out of the RISC PAUSED state.
			 */
363 364
			wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
			rd_reg_word(&reg->hccr);
365

366
			ha->isp_ops->fw_dump(vha);
367
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
368
			break;
369
		} else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0)
L
Linus Torvalds 已提交
370 371
			break;

372 373 374
		if (rd_reg_word(&reg->semaphore) & BIT_0) {
			wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
			rd_reg_word(&reg->hccr);
L
Linus Torvalds 已提交
375 376

			/* Get mailbox data. */
377 378
			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
379
				qla2x00_mbx_completion(vha, mb[0]);
L
Linus Torvalds 已提交
380
				status |= MBX_INTERRUPT;
381 382 383 384
			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
385
				qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
386 387
			} else {
				/*EMPTY*/
388 389 390
				ql_dbg(ql_dbg_async, vha, 0x5025,
				    "Unrecognized interrupt type (%d).\n",
				    mb[0]);
L
Linus Torvalds 已提交
391 392
			}
			/* Release mailbox registers. */
393 394
			wrt_reg_word(&reg->semaphore, 0);
			rd_reg_word(&reg->semaphore);
L
Linus Torvalds 已提交
395
		} else {
396
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
397

398 399
			wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
			rd_reg_word(&reg->hccr);
L
Linus Torvalds 已提交
400 401
		}
	}
402
	qla2x00_handle_mbx_completion(ha, status);
403
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
404 405 406 407

	return (IRQ_HANDLED);
}

408
bool
409
qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
410 411
{
	/* Check for PCI disconnection */
412
	if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
413
		if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
414 415
		    !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
		    !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
416
			qla_schedule_eeh_work(vha);
417
		}
418 419 420 421 422
		return true;
	} else
		return false;
}

423 424 425 426 427 428
bool
qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
{
	return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
}

L
Linus Torvalds 已提交
429 430
/**
 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
431
 * @irq: interrupt number
L
Linus Torvalds 已提交
432 433 434 435 436 437 438
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
439
qla2300_intr_handler(int irq, void *dev_id)
L
Linus Torvalds 已提交
440
{
441
	scsi_qla_host_t	*vha;
442
	struct device_reg_2xxx __iomem *reg;
L
Linus Torvalds 已提交
443 444 445 446
	int		status;
	unsigned long	iter;
	uint32_t	stat;
	uint16_t	hccr;
447
	uint16_t	mb[8];
448 449
	struct rsp_que *rsp;
	struct qla_hw_data *ha;
450
	unsigned long	flags;
L
Linus Torvalds 已提交
451

452 453
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
454 455
		ql_log(ql_log_info, NULL, 0x5058,
		    "%s: NULL response queue pointer.\n", __func__);
L
Linus Torvalds 已提交
456 457 458
		return (IRQ_NONE);
	}

459
	ha = rsp->hw;
460
	reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
461 462
	status = 0;

463
	spin_lock_irqsave(&ha->hardware_lock, flags);
464
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
465
	for (iter = 50; iter--; ) {
466
		stat = rd_reg_dword(&reg->u.isp2300.host_status);
467
		if (qla2x00_check_reg32_for_disconnect(vha, stat))
468
			break;
L
Linus Torvalds 已提交
469
		if (stat & HSR_RISC_PAUSED) {
470
			if (unlikely(pci_channel_offline(ha->pdev)))
471 472
				break;

473
			hccr = rd_reg_word(&reg->hccr);
474

L
Linus Torvalds 已提交
475
			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
476 477 478
				ql_log(ql_log_warn, vha, 0x5026,
				    "Parity error -- HCCR=%x, Dumping "
				    "firmware.\n", hccr);
L
Linus Torvalds 已提交
479
			else
480 481 482
				ql_log(ql_log_warn, vha, 0x5027,
				    "RISC paused -- HCCR=%x, Dumping "
				    "firmware.\n", hccr);
L
Linus Torvalds 已提交
483 484 485 486

			/*
			 * Issue a "HARD" reset in order for the RISC
			 * interrupt bit to be cleared.  Schedule a big
487
			 * hammer to get out of the RISC PAUSED state.
L
Linus Torvalds 已提交
488
			 */
489 490
			wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
			rd_reg_word(&reg->hccr);
491

492
			ha->isp_ops->fw_dump(vha);
493
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
494 495 496 497 498 499 500 501 502
			break;
		} else if ((stat & HSR_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
		case 0x1:
		case 0x2:
		case 0x10:
		case 0x11:
503
			qla2x00_mbx_completion(vha, MSW(stat));
L
Linus Torvalds 已提交
504 505 506
			status |= MBX_INTERRUPT;

			/* Release mailbox registers. */
507
			wrt_reg_word(&reg->semaphore, 0);
L
Linus Torvalds 已提交
508 509
			break;
		case 0x12:
510 511 512 513
			mb[0] = MSW(stat);
			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
514
			qla2x00_async_event(vha, rsp, mb);
515 516
			break;
		case 0x13:
517
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
518 519
			break;
		case 0x15:
520 521
			mb[0] = MBA_CMPLT_1_16BIT;
			mb[1] = MSW(stat);
522
			qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
523 524
			break;
		case 0x16:
525 526 527
			mb[0] = MBA_SCSI_COMPLETION;
			mb[1] = MSW(stat);
			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
528
			qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
529 530
			break;
		default:
531 532
			ql_dbg(ql_dbg_async, vha, 0x5028,
			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
L
Linus Torvalds 已提交
533 534
			break;
		}
535 536
		wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
		rd_reg_word_relaxed(&reg->hccr);
L
Linus Torvalds 已提交
537
	}
538
	qla2x00_handle_mbx_completion(ha, status);
539
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
540 541 542 543 544 545

	return (IRQ_HANDLED);
}

/**
 * qla2x00_mbx_completion() - Process mailbox command completions.
546
 * @vha: SCSI driver HA context
L
Linus Torvalds 已提交
547 548 549
 * @mb0: Mailbox0 register
 */
static void
550
qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
L
Linus Torvalds 已提交
551 552
{
	uint16_t	cnt;
553
	uint32_t	mboxes;
554
	__le16 __iomem *wptr;
555
	struct qla_hw_data *ha = vha->hw;
556
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
557

558
	/* Read all mbox registers? */
559 560
	WARN_ON_ONCE(ha->mbx_count > 32);
	mboxes = (1ULL << ha->mbx_count) - 1;
561
	if (!ha->mcp)
562
		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
563 564 565
	else
		mboxes = ha->mcp->in_mb;

L
Linus Torvalds 已提交
566 567 568
	/* Load return mailbox registers. */
	ha->flags.mbox_int = 1;
	ha->mailbox_out[0] = mb0;
569
	mboxes >>= 1;
570
	wptr = MAILBOX_REG(ha, reg, 1);
L
Linus Torvalds 已提交
571 572

	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
573
		if (IS_QLA2200(ha) && cnt == 8)
574
			wptr = MAILBOX_REG(ha, reg, 8);
575
		if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
L
Linus Torvalds 已提交
576
			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
577
		else if (mboxes & BIT_0)
578
			ha->mailbox_out[cnt] = rd_reg_word(wptr);
579

L
Linus Torvalds 已提交
580
		wptr++;
581
		mboxes >>= 1;
L
Linus Torvalds 已提交
582 583 584
	}
}

585 586 587 588 589 590 591
static void
qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
{
	static char *event[] =
		{ "Complete", "Request Notification", "Time Extension" };
	int rval;
	struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
592
	struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
593
	__le16 __iomem *wptr;
594 595 596
	uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];

	/* Seed data -- mailbox1 -> mailbox7. */
597
	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
598
		wptr = &reg24->mailbox1;
599
	else if (IS_QLA8044(vha->hw))
600
		wptr = &reg82->mailbox_out[1];
601 602 603
	else
		return;

604
	for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
605
		mb[cnt] = rd_reg_word(wptr);
606

607
	ql_dbg(ql_dbg_async, vha, 0x5021,
608
	    "Inter-Driver Communication %s -- "
609 610 611
	    "%04x %04x %04x %04x %04x %04x %04x.\n",
	    event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
	    mb[4], mb[5], mb[6]);
612 613 614 615 616
	switch (aen) {
	/* Handle IDC Error completion case. */
	case MBA_IDC_COMPLETE:
		if (mb[1] >> 15) {
			vha->hw->flags.idc_compl_status = 1;
617
			if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
				complete(&vha->hw->dcbx_comp);
		}
		break;

	case MBA_IDC_NOTIFY:
		/* Acknowledgement needed? [Notify && non-zero timeout]. */
		timeout = (descr >> 8) & 0xf;
		ql_dbg(ql_dbg_async, vha, 0x5022,
		    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
		    vha->host_no, event[aen & 0xff], timeout);

		if (!timeout)
			return;
		rval = qla2x00_post_idc_ack_work(vha, mb);
		if (rval != QLA_SUCCESS)
			ql_log(ql_log_warn, vha, 0x5023,
			    "IDC failed to post ACK.\n");
		break;
	case MBA_IDC_TIME_EXT:
		vha->hw->idc_extend_tmo = descr;
		ql_dbg(ql_dbg_async, vha, 0x5087,
		    "%lu Inter-Driver Communication %s -- "
		    "Extend timeout by=%d.\n",
		    vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
		break;
643
	}
644 645
}

646
#define LS_UNKNOWN	2
647 648
const char *
qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
649
{
650
	static const char *const link_speeds[] = {
651
		"1", "2", "?", "4", "8", "16", "32", "64", "10"
652
	};
653
#define	QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
654 655

	if (IS_QLA2100(ha) || IS_QLA2200(ha))
656 657
		return link_speeds[0];
	else if (speed == 0x13)
658 659
		return link_speeds[QLA_LAST_SPEED];
	else if (speed < QLA_LAST_SPEED)
660 661 662
		return link_speeds[speed];
	else
		return link_speeds[LS_UNKNOWN];
663 664
}

665
static void
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
{
	struct qla_hw_data *ha = vha->hw;

	/*
	 * 8200 AEN Interpretation:
	 * mb[0] = AEN code
	 * mb[1] = AEN Reason code
	 * mb[2] = LSW of Peg-Halt Status-1 Register
	 * mb[6] = MSW of Peg-Halt Status-1 Register
	 * mb[3] = LSW of Peg-Halt Status-2 register
	 * mb[7] = MSW of Peg-Halt Status-2 register
	 * mb[4] = IDC Device-State Register value
	 * mb[5] = IDC Driver-Presence Register value
	 */
	ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
	    "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
	    mb[0], mb[1], mb[2], mb[6]);
	ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
	    "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
	    "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);

	if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
				IDC_HEARTBEAT_FAILURE)) {
		ha->flags.nic_core_hung = 1;
		ql_log(ql_log_warn, vha, 0x5060,
		    "83XX: F/W Error Reported: Check if reset required.\n");

		if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
			uint32_t protocol_engine_id, fw_err_code, err_level;

			/*
			 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
			 *  - PEG-Halt Status-1 Register:
			 *	(LSW = mb[2], MSW = mb[6])
			 *	Bits 0-7   = protocol-engine ID
			 *	Bits 8-28  = f/w error code
			 *	Bits 29-31 = Error-level
			 *	    Error-level 0x1 = Non-Fatal error
			 *	    Error-level 0x2 = Recoverable Fatal error
			 *	    Error-level 0x4 = UnRecoverable Fatal error
			 *  - PEG-Halt Status-2 Register:
			 *	(LSW = mb[3], MSW = mb[7])
			 */
			protocol_engine_id = (mb[2] & 0xff);
			fw_err_code = (((mb[2] & 0xff00) >> 8) |
			    ((mb[6] & 0x1fff) << 8));
			err_level = ((mb[6] & 0xe000) >> 13);
			ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
			    "Register: protocol_engine_id=0x%x "
			    "fw_err_code=0x%x err_level=0x%x.\n",
			    protocol_engine_id, fw_err_code, err_level);
			ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
			    "Register: 0x%x%x.\n", mb[7], mb[3]);
			if (err_level == ERR_LEVEL_NON_FATAL) {
				ql_log(ql_log_warn, vha, 0x5063,
722
				    "Not a fatal error, f/w has recovered itself.\n");
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
			} else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
				ql_log(ql_log_fatal, vha, 0x5064,
				    "Recoverable Fatal error: Chip reset "
				    "required.\n");
				qla83xx_schedule_work(vha,
				    QLA83XX_NIC_CORE_RESET);
			} else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
				ql_log(ql_log_fatal, vha, 0x5065,
				    "Unrecoverable Fatal error: Set FAILED "
				    "state, reboot required.\n");
				qla83xx_schedule_work(vha,
				    QLA83XX_NIC_CORE_UNRECOVERABLE);
			}
		}

		if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
			uint16_t peg_fw_state, nw_interface_link_up;
			uint16_t nw_interface_signal_detect, sfp_status;
			uint16_t htbt_counter, htbt_monitor_enable;
742
			uint16_t sfp_additional_info, sfp_multirate;
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
			uint16_t sfp_tx_fault, link_speed, dcbx_status;

			/*
			 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
			 *  - PEG-to-FC Status Register:
			 *	(LSW = mb[2], MSW = mb[6])
			 *	Bits 0-7   = Peg-Firmware state
			 *	Bit 8      = N/W Interface Link-up
			 *	Bit 9      = N/W Interface signal detected
			 *	Bits 10-11 = SFP Status
			 *	  SFP Status 0x0 = SFP+ transceiver not expected
			 *	  SFP Status 0x1 = SFP+ transceiver not present
			 *	  SFP Status 0x2 = SFP+ transceiver invalid
			 *	  SFP Status 0x3 = SFP+ transceiver present and
			 *	  valid
			 *	Bits 12-14 = Heartbeat Counter
			 *	Bit 15     = Heartbeat Monitor Enable
			 *	Bits 16-17 = SFP Additional Info
			 *	  SFP info 0x0 = Unregocnized transceiver for
			 *	  Ethernet
			 *	  SFP info 0x1 = SFP+ brand validation failed
			 *	  SFP info 0x2 = SFP+ speed validation failed
			 *	  SFP info 0x3 = SFP+ access error
			 *	Bit 18     = SFP Multirate
			 *	Bit 19     = SFP Tx Fault
			 *	Bits 20-22 = Link Speed
			 *	Bits 23-27 = Reserved
			 *	Bits 28-30 = DCBX Status
			 *	  DCBX Status 0x0 = DCBX Disabled
			 *	  DCBX Status 0x1 = DCBX Enabled
			 *	  DCBX Status 0x2 = DCBX Exchange error
			 *	Bit 31     = Reserved
			 */
			peg_fw_state = (mb[2] & 0x00ff);
			nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
			nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
			sfp_status = ((mb[2] & 0x0c00) >> 10);
			htbt_counter = ((mb[2] & 0x7000) >> 12);
			htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
782
			sfp_additional_info = (mb[6] & 0x0003);
783 784 785 786 787 788 789 790 791 792 793 794 795 796
			sfp_multirate = ((mb[6] & 0x0004) >> 2);
			sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
			link_speed = ((mb[6] & 0x0070) >> 4);
			dcbx_status = ((mb[6] & 0x7000) >> 12);

			ql_log(ql_log_warn, vha, 0x5066,
			    "Peg-to-Fc Status Register:\n"
			    "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
			    "nw_interface_signal_detect=0x%x"
			    "\nsfp_statis=0x%x.\n ", peg_fw_state,
			    nw_interface_link_up, nw_interface_signal_detect,
			    sfp_status);
			ql_log(ql_log_warn, vha, 0x5067,
			    "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
797
			    "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
798
			    htbt_counter, htbt_monitor_enable,
799
			    sfp_additional_info, sfp_multirate);
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
			ql_log(ql_log_warn, vha, 0x5068,
			    "sfp_tx_fault=0x%x, link_state=0x%x, "
			    "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
			    dcbx_status);

			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
		}

		if (mb[1] & IDC_HEARTBEAT_FAILURE) {
			ql_log(ql_log_warn, vha, 0x5069,
			    "Heartbeat Failure encountered, chip reset "
			    "required.\n");

			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
		}
	}

	if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
		ql_log(ql_log_info, vha, 0x506a,
		    "IDC Device-State changed = 0x%x.\n", mb[4]);
820 821
		if (ha->flags.nic_core_reset_owner)
			return;
822 823 824 825
		qla83xx_schedule_work(vha, MBA_IDC_AEN);
	}
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
int
qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
{
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
	uint32_t vp_did;
	unsigned long flags;
	int ret = 0;

	if (!ha->num_vhosts)
		return ret;

	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		vp_did = vp->d_id.b24;
		if (vp_did == rscn_entry) {
			ret = 1;
			break;
		}
	}
	spin_unlock_irqrestore(&ha->vport_slock, flags);

	return ret;
}

851
fc_port_t *
852 853
qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
{
854 855 856 857 858 859 860 861
	fc_port_t *f, *tf;

	f = tf = NULL;
	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
		if (f->loop_id == loop_id)
			return f;
	return NULL;
}
862

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
fc_port_t *
qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
{
	fc_port_t *f, *tf;

	f = tf = NULL;
	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
		if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
			if (incl_deleted)
				return f;
			else if (f->deleted == 0)
				return f;
		}
	}
	return NULL;
}
879

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
fc_port_t *
qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
	u8 incl_deleted)
{
	fc_port_t *f, *tf;

	f = tf = NULL;
	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
		if (f->d_id.b24 == id->b24) {
			if (incl_deleted)
				return f;
			else if (f->deleted == 0)
				return f;
		}
	}
895 896 897
	return NULL;
}

898 899 900 901 902
/* Shall be called only on supported adapters. */
static void
qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
{
	struct qla_hw_data *ha = vha->hw;
903
	bool reset_isp_needed = false;
904 905 906 907

	ql_log(ql_log_warn, vha, 0x02f0,
	       "MPI Heartbeat stop. MPI reset is%s needed. "
	       "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
908
	       mb[1] & BIT_8 ? "" : " not",
909 910 911 912 913 914 915 916 917
	       mb[0], mb[1], mb[2], mb[3]);

	if ((mb[1] & BIT_8) == 0)
		return;

	ql_log(ql_log_warn, vha, 0x02f1,
	       "MPI Heartbeat stop. FW dump needed\n");

	if (ql2xfulldump_on_mpifail) {
918
		ha->isp_ops->fw_dump(vha);
919
		reset_isp_needed = true;
920 921 922 923 924 925 926 927 928 929 930
	}

	ha->isp_ops->mpi_fw_dump(vha, 1);

	if (reset_isp_needed) {
		vha->hw->flags.fw_init_done = 0;
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		qla2xxx_wake_dpc(vha);
	}
}

931
static struct purex_item *
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
{
	struct purex_item *item = NULL;
	uint8_t item_hdr_size = sizeof(*item);

	if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
		item = kzalloc(item_hdr_size +
		    (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
	} else {
		if (atomic_inc_return(&vha->default_item.in_use) == 1) {
			item = &vha->default_item;
			goto initialize_purex_header;
		} else {
			item = kzalloc(item_hdr_size, GFP_ATOMIC);
		}
	}
	if (!item) {
		ql_log(ql_log_warn, vha, 0x5092,
		       ">> Failed allocate purex list item.\n");

		return NULL;
	}

initialize_purex_header:
	item->vha = vha;
	item->size = size;
	return item;
}

static void
qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
			 void (*process_item)(struct scsi_qla_host *vha,
					      struct purex_item *pkt))
{
	struct purex_list *list = &vha->purex_list;
	ulong flags;

	pkt->process_item = process_item;

	spin_lock_irqsave(&list->lock, flags);
	list_add_tail(&pkt->list, &list->head);
	spin_unlock_irqrestore(&list->lock, flags);

	set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
}

/**
 * qla24xx_copy_std_pkt() - Copy over purex ELS which is
 * contained in a single IOCB.
 * purex packet.
 * @vha: SCSI driver HA context
 * @pkt: ELS packet
 */
985
static struct purex_item
986 987 988 989 990 991 992 993 994 995 996 997 998
*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
{
	struct purex_item *item;

	item = qla24xx_alloc_purex_item(vha,
					QLA_DEFAULT_PAYLOAD_SIZE);
	if (!item)
		return item;

	memcpy(&item->iocb, pkt, sizeof(item->iocb));
	return item;
}

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
/**
 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
 * span over multiple IOCBs.
 * @vha: SCSI driver HA context
 * @pkt: ELS packet
 * @rsp: Response queue
 */
static struct purex_item *
qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
		      struct rsp_que **rsp)
{
	struct purex_entry_24xx *purex = *pkt;
	struct rsp_que *rsp_q = *rsp;
	sts_cont_entry_t *new_pkt;
	uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
	uint16_t buffer_copy_offset = 0;
	uint16_t entry_count, entry_count_remaining;
	struct purex_item *item;
	void *fpin_pkt = NULL;

1019
	total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	    - PURX_ELS_HEADER_SIZE;
	pending_bytes = total_bytes;
	entry_count = entry_count_remaining = purex->entry_count;
	no_bytes = (pending_bytes > sizeof(purex->els_frame_payload))  ?
		   sizeof(purex->els_frame_payload) : pending_bytes;
	ql_log(ql_log_info, vha, 0x509a,
	       "FPIN ELS, frame_size 0x%x, entry count %d\n",
	       total_bytes, entry_count);

	item = qla24xx_alloc_purex_item(vha, total_bytes);
	if (!item)
		return item;

	fpin_pkt = &item->iocb;

	memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
	buffer_copy_offset += no_bytes;
	pending_bytes -= no_bytes;
	--entry_count_remaining;

	((response_t *)purex)->signature = RESPONSE_PROCESSED;
	wmb();

	do {
		while ((total_bytes > 0) && (entry_count_remaining > 0)) {
			if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
				ql_dbg(ql_dbg_async, vha, 0x5084,
				       "Ran out of IOCBs, partial data 0x%x\n",
				       buffer_copy_offset);
				cpu_relax();
				continue;
			}

			new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
			*pkt = new_pkt;

			if (new_pkt->entry_type != STATUS_CONT_TYPE) {
				ql_log(ql_log_warn, vha, 0x507a,
				       "Unexpected IOCB type, partial data 0x%x\n",
				       buffer_copy_offset);
				break;
			}

			rsp_q->ring_index++;
			if (rsp_q->ring_index == rsp_q->length) {
				rsp_q->ring_index = 0;
				rsp_q->ring_ptr = rsp_q->ring;
			} else {
				rsp_q->ring_ptr++;
			}
			no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
			    sizeof(new_pkt->data) : pending_bytes;
			if ((buffer_copy_offset + no_bytes) <= total_bytes) {
				memcpy(((uint8_t *)fpin_pkt +
				    buffer_copy_offset), new_pkt->data,
				    no_bytes);
				buffer_copy_offset += no_bytes;
				pending_bytes -= no_bytes;
				--entry_count_remaining;
			} else {
				ql_log(ql_log_warn, vha, 0x5044,
				       "Attempt to copy more that we got, optimizing..%x\n",
				       buffer_copy_offset);
				memcpy(((uint8_t *)fpin_pkt +
				    buffer_copy_offset), new_pkt->data,
				    total_bytes - buffer_copy_offset);
			}

			((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
			wmb();
		}

		if (pending_bytes != 0 || entry_count_remaining != 0) {
			ql_log(ql_log_fatal, vha, 0x508b,
			       "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
			       total_bytes, entry_count_remaining);
			qla24xx_free_purex_item(item);
			return NULL;
		}
	} while (entry_count_remaining > 0);
	host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
	return item;
}

L
Linus Torvalds 已提交
1104 1105
/**
 * qla2x00_async_event() - Process aynchronous events.
1106 1107
 * @vha: SCSI driver HA context
 * @rsp: response queue
1108
 * @mb: Mailbox registers (0 - 3)
L
Linus Torvalds 已提交
1109
 */
1110
void
1111
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
L
Linus Torvalds 已提交
1112 1113
{
	uint16_t	handle_cnt;
1114
	uint16_t	cnt, mbx;
L
Linus Torvalds 已提交
1115
	uint32_t	handles[5];
1116
	struct qla_hw_data *ha = vha->hw;
1117
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1118
	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1119
	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1120
	uint32_t	rscn_entry, host_pid;
1121
	unsigned long	flags;
1122
	fc_port_t	*fcport = NULL;
L
Linus Torvalds 已提交
1123

1124 1125 1126
	if (!vha->hw->flags.fw_started)
		return;

L
Linus Torvalds 已提交
1127 1128
	/* Setup to process RIO completion. */
	handle_cnt = 0;
1129
	if (IS_CNA_CAPABLE(ha))
1130
		goto skip_rio;
L
Linus Torvalds 已提交
1131 1132
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:
1133
		handles[0] = make_handle(mb[2], mb[1]);
L
Linus Torvalds 已提交
1134 1135 1136
		handle_cnt = 1;
		break;
	case MBA_CMPLT_1_16BIT:
1137
		handles[0] = mb[1];
L
Linus Torvalds 已提交
1138 1139 1140 1141
		handle_cnt = 1;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_16BIT:
1142 1143
		handles[0] = mb[1];
		handles[1] = mb[2];
L
Linus Torvalds 已提交
1144 1145 1146 1147
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_3_16BIT:
1148 1149 1150
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
1151 1152 1153 1154
		handle_cnt = 3;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_4_16BIT:
1155 1156 1157
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
1158 1159 1160 1161 1162
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handle_cnt = 4;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_5_16BIT:
1163 1164 1165
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
1166 1167 1168 1169 1170 1171
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
		handle_cnt = 5;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_32BIT:
1172 1173 1174
		handles[0] = make_handle(mb[2], mb[1]);
		handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
					 RD_MAILBOX_REG(ha, reg, 6));
L
Linus Torvalds 已提交
1175 1176 1177 1178 1179 1180
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	default:
		break;
	}
1181
skip_rio:
L
Linus Torvalds 已提交
1182 1183
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:	/* Fast Post */
1184
		if (!vha->flags.online)
L
Linus Torvalds 已提交
1185 1186 1187
			break;

		for (cnt = 0; cnt < handle_cnt; cnt++)
1188 1189
			qla2x00_process_completed_request(vha, rsp->req,
				handles[cnt]);
L
Linus Torvalds 已提交
1190 1191 1192
		break;

	case MBA_RESET:			/* Reset */
1193 1194
		ql_dbg(ql_dbg_async, vha, 0x5002,
		    "Asynchronous RESET.\n");
L
Linus Torvalds 已提交
1195

1196
		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1197 1198 1199
		break;

	case MBA_SYSTEM_ERR:		/* System Error */
1200
		mbx = 0;
1201 1202 1203

		vha->hw_err_cnt++;

1204 1205 1206 1207
		if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
		    IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
			u16 m[4];

1208 1209 1210 1211
			m[0] = rd_reg_word(&reg24->mailbox4);
			m[1] = rd_reg_word(&reg24->mailbox5);
			m[2] = rd_reg_word(&reg24->mailbox6);
			mbx = m[3] = rd_reg_word(&reg24->mailbox7);
1212 1213 1214 1215 1216 1217 1218 1219 1220

			ql_log(ql_log_warn, vha, 0x5003,
			    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
			    mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
		} else
			ql_log(ql_log_warn, vha, 0x5003,
			    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
			    mb[1], mb[2], mb[3]);

1221
		if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1222
		    rd_reg_word(&reg24->mailbox7) & BIT_8)
1223
			ha->isp_ops->mpi_fw_dump(vha, 1);
1224
		ha->isp_ops->fw_dump(vha);
1225
		ha->flags.fw_init_done = 0;
1226
		QLA_FW_STOPPED(ha);
L
Linus Torvalds 已提交
1227

1228
		if (IS_FWI2_CAPABLE(ha)) {
1229
			if (mb[1] == 0 && mb[2] == 0) {
1230
				ql_log(ql_log_fatal, vha, 0x5004,
1231 1232
				    "Unrecoverable Hardware Error: adapter "
				    "marked OFFLINE!\n");
1233
				vha->flags.online = 0;
1234
				vha->device_flags |= DFLG_DEV_FAILED;
1235
			} else {
L
Lucas De Marchi 已提交
1236
				/* Check to see if MPI timeout occurred */
1237
				if ((mbx & MBX_3) && (ha->port_no == 0))
1238 1239 1240
					set_bit(MPI_RESET_NEEDED,
					    &vha->dpc_flags);

1241
				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1242
			}
1243
		} else if (mb[1] == 0) {
1244
			ql_log(ql_log_fatal, vha, 0x5005,
L
Linus Torvalds 已提交
1245 1246
			    "Unrecoverable Hardware Error: adapter marked "
			    "OFFLINE!\n");
1247
			vha->flags.online = 0;
1248
			vha->device_flags |= DFLG_DEV_FAILED;
L
Linus Torvalds 已提交
1249
		} else
1250
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1251 1252 1253
		break;

	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
1254 1255
		ql_log(ql_log_warn, vha, 0x5006,
		    "ISP Request Transfer Error (%x).\n",  mb[1]);
L
Linus Torvalds 已提交
1256

1257 1258
		vha->hw_err_cnt++;

1259
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1260 1261 1262
		break;

	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
1263
		ql_log(ql_log_warn, vha, 0x5007,
1264
		    "ISP Response Transfer Error (%x).\n", mb[1]);
L
Linus Torvalds 已提交
1265

1266 1267
		vha->hw_err_cnt++;

1268
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1269 1270 1271
		break;

	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
1272
		ql_dbg(ql_dbg_async, vha, 0x5008,
1273 1274
		    "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
		break;
L
Linus Torvalds 已提交
1275

1276
	case MBA_LOOP_INIT_ERR:
1277
		ql_log(ql_log_warn, vha, 0x5090,
1278 1279
		    "LOOP INIT ERROR (%x).\n", mb[1]);
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1280
		break;
1281

L
Linus Torvalds 已提交
1282
	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
1283 1284
		ha->flags.lip_ae = 1;

1285
		ql_dbg(ql_dbg_async, vha, 0x5009,
1286
		    "LIP occurred (%x).\n", mb[1]);
L
Linus Torvalds 已提交
1287

1288 1289 1290
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1291
			qla2x00_mark_all_devices_lost(vha);
L
Linus Torvalds 已提交
1292 1293
		}

1294 1295 1296
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1297 1298
		}

1299 1300
		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1301

1302 1303
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
L
Linus Torvalds 已提交
1304 1305 1306
		break;

	case MBA_LOOP_UP:		/* Loop Up Event */
1307
		if (IS_QLA2100(ha) || IS_QLA2200(ha))
1308
			ha->link_data_rate = PORT_SPEED_1GB;
1309
		else
L
Linus Torvalds 已提交
1310 1311
			ha->link_data_rate = mb[1];

1312
		ql_log(ql_log_info, vha, 0x500a,
1313
		    "LOOP UP detected (%s Gbps).\n",
1314
		    qla2x00_get_link_speed_str(ha, ha->link_data_rate));
L
Linus Torvalds 已提交
1315

1316 1317 1318 1319 1320 1321
		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
			if (mb[2] & BIT_0)
				ql_log(ql_log_info, vha, 0x11a0,
				    "FEC=enabled (link up).\n");
		}

1322 1323
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1324

1325 1326 1327 1328 1329
		if (vha->link_down_time < vha->hw->port_down_retry_count) {
			vha->short_link_down_cnt++;
			vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
		}

L
Linus Torvalds 已提交
1330 1331 1332
		break;

	case MBA_LOOP_DOWN:		/* Loop Down Event */
1333
		SAVE_TOPO(ha);
1334 1335
		ha->flags.lip_ae = 0;
		ha->current_topology = 0;
1336
		vha->link_down_time = 0;
1337

1338
		mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1339 1340
			? rd_reg_word(&reg24->mailbox4) : 0;
		mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
1341
			: mbx;
1342
		ql_log(ql_log_info, vha, 0x500b,
1343 1344
		    "LOOP DOWN detected (%x %x %x %x).\n",
		    mb[1], mb[2], mb[3], mbx);
L
Linus Torvalds 已提交
1345

1346 1347 1348
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1349 1350 1351
			/*
			 * In case of loop down, restore WWPN from
			 * NVRAM in case of FA-WWPN capable ISP
1352
			 * Restore for Physical Port only
1353
			 */
1354
			if (!vha->vp_idx) {
1355 1356
				if (ha->flags.fawwpn_enabled &&
				    (ha->current_topology == ISP_CFG_F)) {
1357
					void *wwpn = ha->init_cb->port_name;
1358

1359 1360 1361 1362
					memcpy(vha->port_name, wwpn, WWN_SIZE);
					fc_host_port_name(vha->host) =
					    wwn_to_u64(vha->port_name);
					ql_dbg(ql_dbg_init + ql_dbg_verbose,
1363
					    vha, 0x00d8, "LOOP DOWN detected,"
1364 1365 1366 1367 1368
					    "restore WWPN %016llx\n",
					    wwn_to_u64(vha->port_name));
				}

				clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1369 1370
			}

1371
			vha->device_flags |= DFLG_NO_CABLE;
1372
			qla2x00_mark_all_devices_lost(vha);
L
Linus Torvalds 已提交
1373 1374
		}

1375 1376 1377
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1378 1379
		}

1380
		vha->flags.management_server_logged_in = 0;
1381
		ha->link_data_rate = PORT_SPEED_UNKNOWN;
1382
		qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
L
Linus Torvalds 已提交
1383 1384 1385
		break;

	case MBA_LIP_RESET:		/* LIP reset occurred */
1386
		ql_dbg(ql_dbg_async, vha, 0x500c,
1387
		    "LIP reset occurred (%x).\n", mb[1]);
L
Linus Torvalds 已提交
1388

1389 1390 1391
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1392
			qla2x00_mark_all_devices_lost(vha);
L
Linus Torvalds 已提交
1393 1394
		}

1395 1396 1397
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1398 1399
		}

1400
		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1401 1402

		ha->operating_mode = LOOP;
1403 1404
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
L
Linus Torvalds 已提交
1405 1406
		break;

1407
	/* case MBA_DCBX_COMPLETE: */
L
Linus Torvalds 已提交
1408
	case MBA_POINT_TO_POINT:	/* Point-to-Point */
1409 1410
		ha->flags.lip_ae = 0;

L
Linus Torvalds 已提交
1411 1412 1413
		if (IS_QLA2100(ha))
			break;

1414
		if (IS_CNA_CAPABLE(ha)) {
1415 1416 1417
			ql_dbg(ql_dbg_async, vha, 0x500d,
			    "DCBX Completed -- %04x %04x %04x.\n",
			    mb[1], mb[2], mb[3]);
1418
			if (ha->notify_dcbx_comp && !vha->vp_idx)
1419 1420 1421
				complete(&ha->dcbx_comp);

		} else
1422 1423
			ql_dbg(ql_dbg_async, vha, 0x500e,
			    "Asynchronous P2P MODE received.\n");
L
Linus Torvalds 已提交
1424 1425 1426 1427 1428

		/*
		 * Until there's a transition from loop down to loop up, treat
		 * this as loop down only.
		 */
1429 1430 1431 1432
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			if (!atomic_read(&vha->loop_down_timer))
				atomic_set(&vha->loop_down_timer,
L
Linus Torvalds 已提交
1433
				    LOOP_DOWN_TIME);
1434
			if (!N2N_TOPO(ha))
1435
				qla2x00_mark_all_devices_lost(vha);
L
Linus Torvalds 已提交
1436 1437
		}

1438 1439 1440
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1441 1442
		}

1443 1444 1445 1446 1447
		if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
			set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);

		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1448

1449
		vha->flags.management_server_logged_in = 0;
L
Linus Torvalds 已提交
1450 1451 1452 1453 1454 1455
		break;

	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
		if (IS_QLA2100(ha))
			break;

1456
		ql_dbg(ql_dbg_async, vha, 0x500f,
L
Linus Torvalds 已提交
1457 1458
		    "Configuration change detected: value=%x.\n", mb[1]);

1459 1460 1461 1462
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			if (!atomic_read(&vha->loop_down_timer))
				atomic_set(&vha->loop_down_timer,
L
Linus Torvalds 已提交
1463
				    LOOP_DOWN_TIME);
1464
			qla2x00_mark_all_devices_lost(vha);
L
Linus Torvalds 已提交
1465 1466
		}

1467 1468 1469
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1470 1471
		}

1472 1473
		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
L
Linus Torvalds 已提交
1474 1475 1476
		break;

	case MBA_PORT_UPDATE:		/* Port database update */
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
		/*
		 * Handle only global and vn-port update events
		 *
		 * Relevant inputs:
		 * mb[1] = N_Port handle of changed port
		 * OR 0xffff for global event
		 * mb[2] = New login state
		 * 7 = Port logged out
		 * mb[3] = LSB is vp_idx, 0xff = all vps
		 *
		 * Skip processing if:
		 *       Event is global, vp_idx is NOT all vps,
		 *           vp_idx does not match
		 *       Event is not global, vp_idx does not match
		 */
1492 1493 1494 1495
		if (IS_QLA2XXX_MIDTYPE(ha) &&
		    ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
			(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
			break;
1496

1497
		if (mb[2] == 0x7) {
1498
			ql_dbg(ql_dbg_async, vha, 0x5010,
1499 1500
			    "Port %s %04x %04x %04x.\n",
			    mb[1] == 0xffff ? "unavailable" : "logout",
1501
			    mb[1], mb[2], mb[3]);
1502 1503 1504 1505

			if (mb[1] == 0xffff)
				goto global_port_update;

1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
			if (mb[1] == NPH_SNS_LID(ha)) {
				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
				break;
			}

			/* use handle_cnt for loop id/nport handle */
			if (IS_FWI2_CAPABLE(ha))
				handle_cnt = NPH_SNS;
			else
				handle_cnt = SIMPLE_NAME_SERVER;
			if (mb[1] == handle_cnt) {
				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
				break;
			}

1523 1524 1525 1526 1527 1528 1529 1530 1531
			/* Port logout */
			fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
			if (!fcport)
				break;
			if (atomic_read(&fcport->state) != FCS_ONLINE)
				break;
			ql_dbg(ql_dbg_async, vha, 0x508a,
			    "Marking port lost loopid=%04x portid=%06x.\n",
			    fcport->loop_id, fcport->d_id.b24);
1532 1533
			if (qla_ini_mode_enabled(vha)) {
				fcport->logout_on_delete = 0;
1534
				qlt_schedule_sess_for_deletion(fcport);
1535
			}
1536 1537 1538
			break;

global_port_update:
1539 1540 1541 1542 1543
			if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
				atomic_set(&vha->loop_state, LOOP_DOWN);
				atomic_set(&vha->loop_down_timer,
				    LOOP_DOWN_TIME);
				vha->device_flags |= DFLG_NO_CABLE;
1544
				qla2x00_mark_all_devices_lost(vha);
1545 1546 1547 1548 1549 1550
			}

			if (vha->vp_idx) {
				atomic_set(&vha->vp_state, VP_FAILED);
				fc_vport_set_state(vha->fc_vport,
				    FC_VPORT_FAILED);
1551
				qla2x00_mark_all_devices_lost(vha);
1552 1553 1554 1555 1556 1557 1558
			}

			vha->flags.management_server_logged_in = 0;
			ha->link_data_rate = PORT_SPEED_UNKNOWN;
			break;
		}

L
Linus Torvalds 已提交
1559
		/*
1560
		 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
L
Linus Torvalds 已提交
1561 1562 1563
		 * event etc. earlier indicating loop is down) then process
		 * it.  Otherwise ignore it and Wait for RSCN to come in.
		 */
1564
		atomic_set(&vha->loop_down_timer, 0);
1565
		if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1566
			!ha->flags.n2n_ae  &&
1567
		    atomic_read(&vha->loop_state) != LOOP_DEAD) {
1568 1569 1570
			ql_dbg(ql_dbg_async, vha, 0x5011,
			    "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
			    mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
1571 1572 1573
			break;
		}

1574 1575 1576
		ql_dbg(ql_dbg_async, vha, 0x5012,
		    "Port database changed %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
1577 1578 1579 1580

		/*
		 * Mark all devices as missing so we will login again.
		 */
1581
		atomic_set(&vha->loop_state, LOOP_UP);
1582
		vha->scan.scan_retry = 0;
L
Linus Torvalds 已提交
1583

1584 1585
		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1586
		set_bit(VP_CONFIG_OK, &vha->vp_flags);
L
Linus Torvalds 已提交
1587 1588 1589
		break;

	case MBA_RSCN_UPDATE:		/* State Change Registration */
1590
		/* Check if the Vport has issued a SCR */
1591
		if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1592 1593
			break;
		/* Only handle SCNs for our Vport index. */
1594
		if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1595
			break;
1596

1597 1598 1599
		ql_log(ql_log_warn, vha, 0x5013,
		       "RSCN database changed -- %04x %04x %04x.\n",
		       mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
1600

1601
		rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1602 1603
		host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
				| vha->d_id.b.al_pa;
L
Linus Torvalds 已提交
1604
		if (rscn_entry == host_pid) {
1605 1606 1607
			ql_dbg(ql_dbg_async, vha, 0x5014,
			    "Ignoring RSCN update to local host "
			    "port ID (%06x).\n", host_pid);
L
Linus Torvalds 已提交
1608 1609 1610
			break;
		}

1611 1612
		/* Ignore reserved bits from RSCN-payload. */
		rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
L
Linus Torvalds 已提交
1613

1614 1615 1616 1617
		/* Skip RSCNs for virtual ports on the same physical port */
		if (qla2x00_is_a_vp_did(vha, rscn_entry))
			break;

1618 1619
		atomic_set(&vha->loop_down_timer, 0);
		vha->flags.management_server_logged_in = 0;
1620 1621
		{
			struct event_arg ea;
L
Linus Torvalds 已提交
1622

1623 1624
			memset(&ea, 0, sizeof(ea));
			ea.id.b24 = rscn_entry;
1625
			ea.id.b.rsvd_1 = rscn_entry >> 24;
1626
			qla2x00_handle_rscn(vha, &ea);
1627
			qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1628
		}
L
Linus Torvalds 已提交
1629
		break;
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
	case MBA_CONGN_NOTI_RECV:
		if (!ha->flags.scm_enabled ||
		    mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
			break;

		if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
			ql_dbg(ql_dbg_async, vha, 0x509b,
			       "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
		} else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
			ql_log(ql_log_warn, vha, 0x509b,
			       "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
		}
		break;
L
Linus Torvalds 已提交
1643 1644
	/* case MBA_RIO_RESPONSE: */
	case MBA_ZIO_RESPONSE:
1645 1646
		ql_dbg(ql_dbg_async, vha, 0x5015,
		    "[R|Z]IO update completion.\n");
L
Linus Torvalds 已提交
1647

1648
		if (IS_FWI2_CAPABLE(ha))
1649
			qla24xx_process_response_queue(vha, rsp);
1650
		else
1651
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
1652
		break;
1653 1654

	case MBA_DISCARD_RND_FRAME:
1655 1656 1657
		ql_dbg(ql_dbg_async, vha, 0x5016,
		    "Discard RND Frame -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1658
		vha->interface_err_cnt++;
1659
		break;
1660 1661

	case MBA_TRACE_NOTIFICATION:
1662 1663
		ql_dbg(ql_dbg_async, vha, 0x5017,
		    "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1664
		break;
1665 1666

	case MBA_ISP84XX_ALERT:
1667 1668 1669
		ql_dbg(ql_dbg_async, vha, 0x5018,
		    "ISP84XX Alert Notification -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1670 1671 1672 1673

		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
		switch (mb[1]) {
		case A84_PANIC_RECOVERY:
1674 1675 1676
			ql_log(ql_log_info, vha, 0x5019,
			    "Alert 84XX: panic recovery %04x %04x.\n",
			    mb[2], mb[3]);
1677 1678 1679
			break;
		case A84_OP_LOGIN_COMPLETE:
			ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1680 1681 1682
			ql_log(ql_log_info, vha, 0x501a,
			    "Alert 84XX: firmware version %x.\n",
			    ha->cs84xx->op_fw_version);
1683 1684 1685
			break;
		case A84_DIAG_LOGIN_COMPLETE:
			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1686 1687 1688
			ql_log(ql_log_info, vha, 0x501b,
			    "Alert 84XX: diagnostic firmware version %x.\n",
			    ha->cs84xx->diag_fw_version);
1689 1690 1691 1692
			break;
		case A84_GOLD_LOGIN_COMPLETE:
			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
			ha->cs84xx->fw_update = 1;
1693 1694 1695
			ql_log(ql_log_info, vha, 0x501c,
			    "Alert 84XX: gold firmware version %x.\n",
			    ha->cs84xx->gold_fw_version);
1696 1697
			break;
		default:
1698 1699
			ql_log(ql_log_warn, vha, 0x501d,
			    "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1700 1701 1702 1703
			    mb[1], mb[2], mb[3]);
		}
		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
		break;
1704
	case MBA_DCBX_START:
1705 1706 1707
		ql_dbg(ql_dbg_async, vha, 0x501e,
		    "DCBX Started -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1708 1709
		break;
	case MBA_DCBX_PARAM_UPDATE:
1710 1711 1712
		ql_dbg(ql_dbg_async, vha, 0x501f,
		    "DCBX Parameters Updated -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1713 1714
		break;
	case MBA_FCF_CONF_ERR:
1715 1716 1717
		ql_dbg(ql_dbg_async, vha, 0x5020,
		    "FCF Configuration Error -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
1718 1719
		break;
	case MBA_IDC_NOTIFY:
1720
		if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1721
			mb[4] = rd_reg_word(&reg24->mailbox4);
1722 1723 1724
			if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
			    (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
			    (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1725
				set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1726 1727 1728 1729 1730 1731
				/*
				 * Extend loop down timer since port is active.
				 */
				if (atomic_read(&vha->loop_state) == LOOP_DOWN)
					atomic_set(&vha->loop_down_timer,
					    LOOP_DOWN_TIME);
1732 1733
				qla2xxx_wake_dpc(vha);
			}
1734
		}
1735
		fallthrough;
1736
	case MBA_IDC_COMPLETE:
1737
		if (ha->notify_lb_portup_comp && !vha->vp_idx)
1738
			complete(&ha->lb_portup_comp);
1739
		fallthrough;
1740
	case MBA_IDC_TIME_EXT:
1741 1742
		if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
		    IS_QLA8044(ha))
1743 1744 1745 1746
			qla81xx_idc_event(vha, mb[0], mb[1]);
		break;

	case MBA_IDC_AEN:
1747
		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1748
			vha->hw_err_cnt++;
1749
			qla27xx_handle_8200_aen(vha, mb);
1750
		} else if (IS_QLA83XX(ha)) {
1751 1752 1753 1754
			mb[4] = rd_reg_word(&reg24->mailbox4);
			mb[5] = rd_reg_word(&reg24->mailbox5);
			mb[6] = rd_reg_word(&reg24->mailbox6);
			mb[7] = rd_reg_word(&reg24->mailbox7);
1755 1756 1757 1758 1759 1760
			qla83xx_handle_8200_aen(vha, mb);
		} else {
			ql_dbg(ql_dbg_async, vha, 0x5052,
			    "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
			    mb[0], mb[1], mb[2], mb[3]);
		}
1761
		break;
1762

1763 1764
	case MBA_DPORT_DIAGNOSTICS:
		ql_dbg(ql_dbg_async, vha, 0x5052,
1765 1766
		    "D-Port Diagnostics: %04x %04x %04x %04x\n",
		    mb[0], mb[1], mb[2], mb[3]);
1767
		memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
			static char *results[] = {
			    "start", "done(pass)", "done(error)", "undefined" };
			static char *types[] = {
			    "none", "dynamic", "static", "other" };
			uint result = mb[1] >> 0 & 0x3;
			uint type = mb[1] >> 6 & 0x3;
			uint sw = mb[1] >> 15 & 0x1;
			ql_dbg(ql_dbg_async, vha, 0x5052,
			    "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
			    results[result], types[type], sw);
			if (result == 2) {
				static char *reasons[] = {
				    "reserved", "unexpected reject",
				    "unexpected phase", "retry exceeded",
				    "timed out", "not supported",
				    "user stopped" };
				uint reason = mb[2] >> 0 & 0xf;
				uint phase = mb[2] >> 12 & 0xf;
				ql_dbg(ql_dbg_async, vha, 0x5052,
				    "D-Port Diagnostics: reason=%s phase=%u \n",
				    reason < 7 ? reasons[reason] : "other",
				    phase >> 1);
			}
		}
1793 1794
		break;

1795 1796 1797 1798 1799
	case MBA_TEMPERATURE_ALERT:
		ql_dbg(ql_dbg_async, vha, 0x505e,
		    "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
		break;

1800 1801 1802
	case MBA_TRANS_INSERT:
		ql_dbg(ql_dbg_async, vha, 0x5091,
		    "Transceiver Insertion: %04x\n", mb[1]);
1803 1804 1805 1806 1807
		set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
		break;

	case MBA_TRANS_REMOVE:
		ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1808 1809
		break;

1810 1811 1812 1813
	default:
		ql_dbg(ql_dbg_async, vha, 0x5057,
		    "Unknown AEN:%04x %04x %04x %04x\n",
		    mb[0], mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
1814
	}
1815

1816 1817
	qlt_async_event(mb[0], vha, mb);

1818
	if (!vha->vp_idx && ha->num_vhosts)
1819
		qla2x00_alert_all_vps(rsp, mb);
L
Linus Torvalds 已提交
1820 1821 1822 1823
}

/**
 * qla2x00_process_completed_request() - Process a Fast Post response.
1824 1825
 * @vha: SCSI driver HA context
 * @req: request queue
L
Linus Torvalds 已提交
1826 1827
 * @index: SRB index
 */
1828
void
1829
qla2x00_process_completed_request(struct scsi_qla_host *vha,
1830
				  struct req_que *req, uint32_t index)
L
Linus Torvalds 已提交
1831 1832
{
	srb_t *sp;
1833
	struct qla_hw_data *ha = vha->hw;
L
Linus Torvalds 已提交
1834 1835

	/* Validate handle. */
1836
	if (index >= req->num_outstanding_cmds) {
1837 1838
		ql_log(ql_log_warn, vha, 0x3014,
		    "Invalid SCSI command index (%x).\n", index);
L
Linus Torvalds 已提交
1839

1840
		if (IS_P3P_TYPE(ha))
1841 1842 1843
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1844 1845 1846
		return;
	}

1847
	sp = req->outstanding_cmds[index];
L
Linus Torvalds 已提交
1848 1849
	if (sp) {
		/* Free outstanding command slot. */
1850
		req->outstanding_cmds[index] = NULL;
L
Linus Torvalds 已提交
1851 1852

		/* Save ISP completion status */
1853
		sp->done(sp, DID_OK << 16);
L
Linus Torvalds 已提交
1854
	} else {
1855
		ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
L
Linus Torvalds 已提交
1856

1857
		if (IS_P3P_TYPE(ha))
1858 1859 1860
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
1861 1862 1863
	}
}

1864
srb_t *
1865 1866 1867 1868 1869
qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
    struct req_que *req, void *iocb)
{
	struct qla_hw_data *ha = vha->hw;
	sts_entry_t *pkt = iocb;
1870
	srb_t *sp;
1871 1872
	uint16_t index;

1873 1874 1875
	if (pkt->handle == QLA_SKIP_HANDLE)
		return NULL;

1876
	index = LSW(pkt->handle);
1877
	if (index >= req->num_outstanding_cmds) {
1878
		ql_log(ql_log_warn, vha, 0x5031,
1879 1880
			   "%s: Invalid command index (%x) type %8ph.\n",
			   func, index, iocb);
1881
		if (IS_P3P_TYPE(ha))
1882 1883 1884
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1885
		return NULL;
1886 1887 1888
	}
	sp = req->outstanding_cmds[index];
	if (!sp) {
1889
		ql_log(ql_log_warn, vha, 0x5032,
1890 1891
			"%s: Invalid completion handle (%x) -- timed-out.\n",
			func, index);
1892
		return NULL;
1893 1894
	}
	if (sp->handle != index) {
1895
		ql_log(ql_log_warn, vha, 0x5033,
1896 1897
			"%s: SRB handle (%x) mismatch %x.\n", func,
			sp->handle, index);
1898 1899
		return NULL;
	}
1900

1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912
	req->outstanding_cmds[index] = NULL;
	return sp;
}

static void
qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct mbx_entry *mbx)
{
	const char func[] = "MBX-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
1913
	struct srb_iocb *lio;
1914
	uint16_t *data;
1915
	uint16_t status;
1916 1917 1918 1919 1920

	sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
	if (!sp)
		return;

1921 1922
	lio = &sp->u.iocb_cmd;
	type = sp->name;
1923
	fcport = sp->fcport;
1924
	data = lio->u.logio.data;
1925

1926
	data[0] = MBS_COMMAND_ERROR;
1927
	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1928
	    QLA_LOGIO_LOGIN_RETRIED : 0;
1929
	if (mbx->entry_status) {
1930
		ql_dbg(ql_dbg_async, vha, 0x5043,
1931
		    "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1932
		    "entry-status=%x status=%x state-flag=%x "
1933 1934
		    "status-flags=%x.\n", type, sp->handle,
		    fcport->d_id.b.domain, fcport->d_id.b.area,
1935 1936
		    fcport->d_id.b.al_pa, mbx->entry_status,
		    le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1937
		    le16_to_cpu(mbx->status_flags));
1938

1939
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1940
		    mbx, sizeof(*mbx));
1941

1942
		goto logio_done;
1943 1944
	}

1945
	status = le16_to_cpu(mbx->status);
1946
	if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1947 1948 1949
	    le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
		status = 0;
	if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1950
		ql_dbg(ql_dbg_async, vha, 0x5045,
1951 1952 1953 1954
		    "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
		    type, sp->handle, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
		    le16_to_cpu(mbx->mb1));
1955 1956

		data[0] = MBS_COMMAND_COMPLETE;
1957
		if (sp->type == SRB_LOGIN_CMD) {
1958 1959 1960
			fcport->port_type = FCT_TARGET;
			if (le16_to_cpu(mbx->mb1) & BIT_0)
				fcport->port_type = FCT_INITIATOR;
1961
			else if (le16_to_cpu(mbx->mb1) & BIT_1)
1962
				fcport->flags |= FCF_FCP2_DEVICE;
1963
		}
1964
		goto logio_done;
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
	}

	data[0] = le16_to_cpu(mbx->mb0);
	switch (data[0]) {
	case MBS_PORT_ID_USED:
		data[1] = le16_to_cpu(mbx->mb1);
		break;
	case MBS_LOOP_ID_USED:
		break;
	default:
		data[0] = MBS_COMMAND_ERROR;
		break;
	}

1979
	ql_log(ql_log_warn, vha, 0x5046,
1980 1981 1982 1983
	    "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
	    "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
	    status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1984
	    le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1985
	    le16_to_cpu(mbx->mb7));
1986

1987
logio_done:
1988
	sp->done(sp, 0);
1989 1990
}

1991 1992 1993 1994 1995
static void
qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct mbx_24xx_entry *pkt)
{
	const char func[] = "MBX-IOCB2";
1996
	struct qla_hw_data *ha = vha->hw;
1997 1998 1999 2000 2001 2002 2003 2004 2005
	srb_t *sp;
	struct srb_iocb *si;
	u16 sz, i;
	int res;

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;

2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
	if (sp->type == SRB_SCSI_CMD ||
	    sp->type == SRB_NVME_CMD ||
	    sp->type == SRB_TM_CMD) {
		ql_log(ql_log_warn, vha, 0x509d,
			"Inconsistent event entry type %d\n", sp->type);
		if (IS_P3P_TYPE(ha))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		return;
	}

2018 2019 2020 2021
	si = &sp->u.iocb_cmd;
	sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));

	for (i = 0; i < sz; i++)
2022
		si->u.mbx.in_mb[i] = pkt->mb[i];
2023 2024 2025

	res = (si->u.mbx.in_mb[0] & MBS_MASK);

2026
	sp->done(sp, res);
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
}

static void
qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct nack_to_isp *pkt)
{
	const char func[] = "nack";
	srb_t *sp;
	int res = 0;

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;

	if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
		res = QLA_FUNCTION_FAILED;

2044
	sp->done(sp, res);
2045 2046
}

2047 2048 2049 2050 2051 2052 2053
static void
qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
    sts_entry_t *pkt, int iocb_type)
{
	const char func[] = "CT_IOCB";
	const char *type;
	srb_t *sp;
2054
	struct bsg_job *bsg_job;
2055
	struct fc_bsg_reply *bsg_reply;
2056
	uint16_t comp_status;
2057
	int res = 0;
2058 2059 2060 2061 2062

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;

2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
	switch (sp->type) {
	case SRB_CT_CMD:
	    bsg_job = sp->u.bsg_job;
	    bsg_reply = bsg_job->reply;

	    type = "ct pass-through";

	    comp_status = le16_to_cpu(pkt->comp_status);

	    /*
	     * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
	     * fc payload  to the caller
	     */
	    bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
	    bsg_job->reply_len = sizeof(struct fc_bsg_reply);

	    if (comp_status != CS_COMPLETE) {
		    if (comp_status == CS_DATA_UNDERRUN) {
			    res = DID_OK << 16;
			    bsg_reply->reply_payload_rcv_len =
2083
				le16_to_cpu(pkt->rsp_info_len);
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096

			    ql_log(ql_log_warn, vha, 0x5048,
				"CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
				type, comp_status,
				bsg_reply->reply_payload_rcv_len);
		    } else {
			    ql_log(ql_log_warn, vha, 0x5049,
				"CT pass-through-%s error comp_status=0x%x.\n",
				type, comp_status);
			    res = DID_ERROR << 16;
			    bsg_reply->reply_payload_rcv_len = 0;
		    }
		    ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
2097
			pkt, sizeof(*pkt));
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
	    } else {
		    res = DID_OK << 16;
		    bsg_reply->reply_payload_rcv_len =
			bsg_job->reply_payload.payload_len;
		    bsg_job->reply_len = 0;
	    }
	    break;
	case SRB_CT_PTHRU_CMD:
	    /*
	     * borrowing sts_entry_24xx.comp_status.
	     * same location as ct_entry_24xx.comp_status
	     */
	     res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
		 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
		 sp->name);
	     break;
2114 2115
	}

2116
	sp->done(sp, res);
2117 2118
}

2119
static void
2120
qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
2121 2122
    struct sts_entry_24xx *pkt, int iocb_type)
{
2123
	struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
2124 2125 2126
	const char func[] = "ELS_CT_IOCB";
	const char *type;
	srb_t *sp;
2127
	struct bsg_job *bsg_job;
2128
	struct fc_bsg_reply *bsg_reply;
2129 2130
	uint16_t comp_status;
	uint32_t fw_status[3];
2131
	int res, logit = 1;
2132
	struct srb_iocb *els;
2133 2134 2135
	uint n;
	scsi_qla_host_t *vha;
	struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
2136

2137
	sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2138 2139
	if (!sp)
		return;
2140 2141
	bsg_job = sp->u.bsg_job;
	vha = sp->vha;
2142 2143

	type = NULL;
2144 2145 2146 2147 2148

	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
	fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
	fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);

2149
	switch (sp->type) {
2150 2151
	case SRB_ELS_CMD_RPT:
	case SRB_ELS_CMD_HST:
2152 2153 2154
		type = "rpt hst";
		break;
	case SRB_ELS_CMD_HST_NOLOGIN:
2155
		type = "els";
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
		{
			struct els_entry_24xx *els = (void *)pkt;
			struct qla_bsg_auth_els_request *p =
				(struct qla_bsg_auth_els_request *)bsg_job->request;

			ql_dbg(ql_dbg_user, vha, 0x700f,
			     "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
			     __func__, sc_to_str(p->e.sub_cmd),
			     e->d_id[2], e->d_id[1], e->d_id[0],
			     comp_status, p->e.extra_rx_xchg_address, bsg_job);

			if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
				if (sp->remap.remapped) {
					n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
						bsg_job->reply_payload.sg_cnt,
						sp->remap.rsp.buf,
						sp->remap.rsp.len);
					ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
					   "%s: SG copied %x of %x\n",
					   __func__, n, sp->remap.rsp.len);
				} else {
					ql_dbg(ql_dbg_user, vha, 0x700f,
					   "%s: NOT REMAPPED (error)...!!!\n",
					   __func__);
				}
			}
		}
2183 2184 2185 2186
		break;
	case SRB_CT_CMD:
		type = "ct pass-through";
		break;
2187 2188
	case SRB_ELS_DCMD:
		type = "Driver ELS logo";
2189 2190 2191 2192 2193 2194 2195 2196
		if (iocb_type != ELS_IOCB_TYPE) {
			ql_dbg(ql_dbg_user, vha, 0x5047,
			    "Completing %s: (%p) type=%d.\n",
			    type, sp, sp->type);
			sp->done(sp, 0);
			return;
		}
		break;
2197 2198 2199 2200
	case SRB_CT_PTHRU_CMD:
		/* borrowing sts_entry_24xx.comp_status.
		   same location as ct_entry_24xx.comp_status
		 */
2201
		res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2202 2203
			(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
			sp->name);
2204
		sp->done(sp, res);
2205
		return;
2206
	default:
2207
		ql_dbg(ql_dbg_user, vha, 0x503e,
2208
		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2209 2210 2211
		return;
	}

2212 2213
	if (iocb_type == ELS_IOCB_TYPE) {
		els = &sp->u.iocb_cmd;
2214 2215 2216 2217
		els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
		els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
		els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
		els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2218 2219 2220 2221 2222
		if (comp_status == CS_COMPLETE) {
			res =  DID_OK << 16;
		} else {
			if (comp_status == CS_DATA_UNDERRUN) {
				res =  DID_OK << 16;
2223 2224
				els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
					ese->total_byte_count));
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237

				if (sp->remap.remapped &&
				    ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
					ql_dbg(ql_dbg_user, vha, 0x503f,
					    "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
					    __func__, e->s_id[0], e->s_id[2], e->s_id[1],
					    e->d_id[2], e->d_id[1], e->d_id[0]);
					logit = 0;
				}

			} else if (comp_status == CS_PORT_LOGGED_OUT) {
				els->u.els_plogi.len = 0;
				res = DID_IMM_RETRY << 16;
2238
				qlt_schedule_sess_for_deletion(sp->fcport);
2239 2240 2241 2242
			} else {
				els->u.els_plogi.len = 0;
				res = DID_ERROR << 16;
			}
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270

			if (logit) {
				if (sp->remap.remapped &&
				    ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
					ql_dbg(ql_dbg_user, vha, 0x503f,
					    "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
					    type, sp->handle, comp_status);

					ql_dbg(ql_dbg_user, vha, 0x503f,
					    "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
					    fw_status[1], fw_status[2],
					    le32_to_cpu(((struct els_sts_entry_24xx *)
						pkt)->total_byte_count),
					    e->s_id[0], e->s_id[2], e->s_id[1],
					    e->d_id[2], e->d_id[1], e->d_id[0]);
				} else {
					ql_log(ql_log_info, vha, 0x503f,
					    "%s IOCB Done hdl=%x comp_status=0x%x\n",
					    type, sp->handle, comp_status);
					ql_log(ql_log_info, vha, 0x503f,
					    "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
					    fw_status[1], fw_status[2],
					    le32_to_cpu(((struct els_sts_entry_24xx *)
						pkt)->total_byte_count),
					    e->s_id[0], e->s_id[2], e->s_id[1],
					    e->d_id[2], e->d_id[1], e->d_id[0]);
				}
			}
2271 2272 2273 2274
		}
		goto els_ct_done;
	}

2275 2276 2277
	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
	 * fc payload  to the caller
	 */
2278 2279
	bsg_job = sp->u.bsg_job;
	bsg_reply = bsg_job->reply;
2280
	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2281 2282 2283 2284
	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);

	if (comp_status != CS_COMPLETE) {
		if (comp_status == CS_DATA_UNDERRUN) {
2285
			res = DID_OK << 16;
2286
			bsg_reply->reply_payload_rcv_len =
2287
				le32_to_cpu(ese->total_byte_count);
2288

2289
			ql_dbg(ql_dbg_user, vha, 0x503f,
2290
			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2291
			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2292
			    type, sp->handle, comp_status, fw_status[1], fw_status[2],
2293
			    le32_to_cpu(ese->total_byte_count));
2294
		} else {
2295
			ql_dbg(ql_dbg_user, vha, 0x5040,
2296
			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2297
			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
2298
			    type, sp->handle, comp_status,
2299 2300
			    le32_to_cpu(ese->error_subcode_1),
			    le32_to_cpu(ese->error_subcode_2));
2301
			res = DID_ERROR << 16;
2302
			bsg_reply->reply_payload_rcv_len = 0;
2303
		}
2304 2305
		memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
		       fw_status, sizeof(fw_status));
2306
		ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2307
		    pkt, sizeof(*pkt));
2308 2309
	}
	else {
2310
		res =  DID_OK << 16;
2311
		bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2312 2313
		bsg_job->reply_len = 0;
	}
2314
els_ct_done:
2315

2316
	sp->done(sp, res);
2317 2318
}

2319 2320 2321 2322 2323 2324 2325 2326
static void
qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct logio_entry_24xx *logio)
{
	const char func[] = "LOGIO-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
2327
	struct srb_iocb *lio;
2328
	uint16_t *data;
2329
	uint32_t iop[2];
2330
	int logit = 1;
2331 2332 2333 2334 2335

	sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
	if (!sp)
		return;

2336 2337
	lio = &sp->u.iocb_cmd;
	type = sp->name;
2338
	fcport = sp->fcport;
2339
	data = lio->u.logio.data;
2340

2341
	data[0] = MBS_COMMAND_ERROR;
2342
	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2343
		QLA_LOGIO_LOGIN_RETRIED : 0;
2344
	if (logio->entry_status) {
2345
		ql_log(ql_log_warn, fcport->vha, 0x5034,
2346
		    "Async-%s error entry - %8phC hdl=%x"
2347
		    "portid=%02x%02x%02x entry-status=%x.\n",
2348
		    type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2349 2350 2351
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
		    logio->entry_status);
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2352
		    logio, sizeof(*logio));
2353

2354
		goto logio_done;
2355 2356 2357
	}

	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2358 2359 2360
		ql_dbg(ql_dbg_async, sp->vha, 0x5036,
		    "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
		    type, sp->handle, fcport->d_id.b24, fcport->port_name,
2361
		    le32_to_cpu(logio->io_parameter[0]));
2362

2363
		vha->hw->exch_starvation = 0;
2364
		data[0] = MBS_COMMAND_COMPLETE;
2365 2366 2367 2368 2369 2370 2371 2372 2373

		if (sp->type == SRB_PRLI_CMD) {
			lio->u.logio.iop[0] =
			    le32_to_cpu(logio->io_parameter[0]);
			lio->u.logio.iop[1] =
			    le32_to_cpu(logio->io_parameter[1]);
			goto logio_done;
		}

2374
		if (sp->type != SRB_LOGIN_CMD)
2375
			goto logio_done;
2376

2377 2378 2379 2380
		lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
		if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
			fcport->flags |= FCF_FCSP_DEVICE;

2381 2382 2383 2384
		iop[0] = le32_to_cpu(logio->io_parameter[0]);
		if (iop[0] & BIT_4) {
			fcport->port_type = FCT_TARGET;
			if (iop[0] & BIT_8)
2385
				fcport->flags |= FCF_FCP2_DEVICE;
2386
		} else if (iop[0] & BIT_5)
2387
			fcport->port_type = FCT_INITIATOR;
2388

2389 2390 2391
		if (iop[0] & BIT_7)
			fcport->flags |= FCF_CONF_COMP_SUPPORTED;

2392 2393 2394 2395 2396
		if (logio->io_parameter[7] || logio->io_parameter[8])
			fcport->supported_classes |= FC_COS_CLASS2;
		if (logio->io_parameter[9] || logio->io_parameter[10])
			fcport->supported_classes |= FC_COS_CLASS3;

2397
		goto logio_done;
2398 2399 2400 2401
	}

	iop[0] = le32_to_cpu(logio->io_parameter[0]);
	iop[1] = le32_to_cpu(logio->io_parameter[1]);
2402 2403
	lio->u.logio.iop[0] = iop[0];
	lio->u.logio.iop[1] = iop[1];
2404 2405 2406 2407
	switch (iop[0]) {
	case LSC_SCODE_PORTID_USED:
		data[0] = MBS_PORT_ID_USED;
		data[1] = LSW(iop[1]);
2408
		logit = 0;
2409 2410 2411
		break;
	case LSC_SCODE_NPORT_USED:
		data[0] = MBS_LOOP_ID_USED;
2412
		logit = 0;
2413
		break;
2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
	case LSC_SCODE_CMD_FAILED:
		if (iop[1] == 0x0606) {
			/*
			 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
			 * Target side acked.
			 */
			data[0] = MBS_COMMAND_COMPLETE;
			goto logio_done;
		}
		data[0] = MBS_COMMAND_ERROR;
		break;
2425 2426 2427
	case LSC_SCODE_NOXCB:
		vha->hw->exch_starvation++;
		if (vha->hw->exch_starvation > 5) {
2428
			ql_log(ql_log_warn, vha, 0xd046,
2429 2430 2431 2432 2433 2434 2435 2436 2437 2438
			    "Exchange starvation. Resetting RISC\n");

			vha->hw->exch_starvation = 0;

			if (IS_P3P_TYPE(vha->hw))
				set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
			else
				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
			qla2xxx_wake_dpc(vha);
		}
2439
		fallthrough;
2440 2441 2442 2443 2444
	default:
		data[0] = MBS_COMMAND_ERROR;
		break;
	}

2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
	if (logit)
		ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
		       "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
		       type, sp->handle, fcport->d_id.b24, fcport->port_name,
		       le16_to_cpu(logio->comp_status),
		       le32_to_cpu(logio->io_parameter[0]),
		       le32_to_cpu(logio->io_parameter[1]));
	else
		ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
		       "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
		       type, sp->handle, fcport->d_id.b24, fcport->port_name,
		       le16_to_cpu(logio->comp_status),
		       le32_to_cpu(logio->io_parameter[0]),
		       le32_to_cpu(logio->io_parameter[1]));
2459

2460
logio_done:
2461
	sp->done(sp, 0);
2462 2463
}

2464
static void
2465
qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2466 2467 2468 2469 2470 2471 2472
{
	const char func[] = "TMF-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
	struct srb_iocb *iocb;
	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2473
	u16 comp_status;
2474 2475 2476 2477 2478

	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
	if (!sp)
		return;

2479
	comp_status = le16_to_cpu(sts->comp_status);
2480 2481
	iocb = &sp->u.iocb_cmd;
	type = sp->name;
2482
	fcport = sp->fcport;
2483
	iocb->u.tmf.data = QLA_SUCCESS;
2484 2485

	if (sts->entry_status) {
2486
		ql_log(ql_log_warn, fcport->vha, 0x5038,
2487 2488
		    "Async-%s error - hdl=%x entry-status(%x).\n",
		    type, sp->handle, sts->entry_status);
2489
		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2490
	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2491
		ql_log(ql_log_warn, fcport->vha, 0x5039,
2492
		    "Async-%s error - hdl=%x completion status(%x).\n",
2493
		    type, sp->handle, comp_status);
2494 2495
		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
	} else if ((le16_to_cpu(sts->scsi_status) &
2496
	    SS_RESPONSE_INFO_LEN_VALID)) {
2497 2498 2499 2500 2501 2502 2503 2504
		if (le32_to_cpu(sts->rsp_data_len) < 4) {
			ql_log(ql_log_warn, fcport->vha, 0x503b,
			    "Async-%s error - hdl=%x not enough response(%d).\n",
			    type, sp->handle, sts->rsp_data_len);
		} else if (sts->data[3]) {
			ql_log(ql_log_warn, fcport->vha, 0x503c,
			    "Async-%s error - hdl=%x response(%x).\n",
			    type, sp->handle, sts->data[3]);
2505
			iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2506
		}
2507 2508
	}

2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
	switch (comp_status) {
	case CS_PORT_LOGGED_OUT:
	case CS_PORT_CONFIG_CHG:
	case CS_PORT_BUSY:
	case CS_INCOMPLETE:
	case CS_PORT_UNAVAILABLE:
	case CS_TIMEOUT:
	case CS_RESET:
		if (atomic_read(&fcport->state) == FCS_ONLINE) {
			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
			       "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
			       fcport->d_id.b.domain, fcport->d_id.b.area,
			       fcport->d_id.b.al_pa,
			       port_state_str[FCS_ONLINE],
			       comp_status);

			qlt_schedule_sess_for_deletion(fcport);
		}
		break;

	default:
		break;
	}

2533
	if (iocb->u.tmf.data != QLA_SUCCESS)
2534 2535
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
		    sts, sizeof(*sts));
2536

2537
	sp->done(sp, 0);
2538 2539
}

2540 2541
static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
    void *tsk, srb_t *sp)
2542 2543 2544 2545 2546 2547
{
	fc_port_t *fcport;
	struct srb_iocb *iocb;
	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
	uint16_t        state_flags;
	struct nvmefc_fcp_req *fd;
2548
	uint16_t        ret = QLA_SUCCESS;
2549
	__le16		comp_status = sts->comp_status;
2550
	int		logit = 0;
2551 2552 2553

	iocb = &sp->u.iocb_cmd;
	fcport = sp->fcport;
2554
	iocb->u.nvme.comp_status = comp_status;
2555 2556 2557
	state_flags  = le16_to_cpu(sts->state_flags);
	fd = iocb->u.nvme.desc;

2558
	if (unlikely(iocb->u.nvme.aen_op))
2559
		atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2560 2561
	else
		sp->qpair->cmd_completion_cnt++;
2562

2563 2564 2565 2566 2567 2568
	if (unlikely(comp_status != CS_COMPLETE))
		logit = 1;

	fd->transferred_length = fd->payload_length -
	    le32_to_cpu(sts->residual_len);

2569 2570 2571 2572 2573 2574 2575 2576 2577 2578
	/*
	 * State flags: Bit 6 and 0.
	 * If 0 is set, we don't care about 6.
	 * both cases resp was dma'd to host buffer
	 * if both are 0, that is good path case.
	 * if six is set and 0 is clear, we need to
	 * copy resp data from status iocb to resp buffer.
	 */
	if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
		iocb->u.nvme.rsp_pyld_len = 0;
2579 2580 2581
	} else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
			(SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
		/* Response already DMA'd to fd->rspaddr. */
2582
		iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
	} else if ((state_flags & SF_FCP_RSP_DMA)) {
		/*
		 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
		 * as an error.
		 */
		iocb->u.nvme.rsp_pyld_len = 0;
		fd->transferred_length = 0;
		ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
			"Unexpected values in NVMe_RSP IU.\n");
		logit = 1;
2593 2594 2595 2596 2597 2598
	} else if (state_flags & SF_NVME_ERSP) {
		uint32_t *inbuf, *outbuf;
		uint16_t iter;

		inbuf = (uint32_t *)&sts->nvme_ersp_data;
		outbuf = (uint32_t *)fd->rspaddr;
2599 2600
		iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
		if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2601 2602 2603 2604 2605 2606 2607 2608 2609
		    sizeof(struct nvme_fc_ersp_iu))) {
			if (ql_mask_match(ql_dbg_io)) {
				WARN_ONCE(1, "Unexpected response payload length %u.\n",
				    iocb->u.nvme.rsp_pyld_len);
				ql_log(ql_log_warn, fcport->vha, 0x5100,
				    "Unexpected response payload length %u.\n",
				    iocb->u.nvme.rsp_pyld_len);
			}
			iocb->u.nvme.rsp_pyld_len =
2610
				cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2611
		}
2612
		iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2613 2614 2615 2616
		for (; iter; iter--)
			*outbuf++ = swab32(*inbuf++);
	}

2617 2618 2619 2620 2621 2622
	if (state_flags & SF_NVME_ERSP) {
		struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
		u32 tgt_xfer_len;

		tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
		if (fd->transferred_length != tgt_xfer_len) {
2623 2624 2625
			ql_log(ql_log_warn, fcport->vha, 0x3079,
			       "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
			       tgt_xfer_len, fd->transferred_length);
2626
			logit = 1;
2627
		} else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2628 2629 2630 2631 2632 2633 2634
			/*
			 * Do not log if this is just an underflow and there
			 * is no data loss.
			 */
			logit = 0;
		}
	}
2635

2636
	if (unlikely(logit))
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
		ql_log(ql_log_warn, fcport->vha, 0x5060,
		   "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x  ox_id=%x\n",
		   sp->name, sp->handle, comp_status,
		   fd->transferred_length, le32_to_cpu(sts->residual_len),
		   sts->ox_id);

	/*
	 * If transport error then Failure (HBA rejects request)
	 * otherwise transport will handle.
	 */
2647
	switch (le16_to_cpu(comp_status)) {
2648 2649
	case CS_COMPLETE:
		break;
2650

2651 2652 2653
	case CS_RESET:
	case CS_PORT_UNAVAILABLE:
	case CS_PORT_LOGGED_OUT:
2654
		fcport->nvme_flag |= NVME_FLAG_RESETTING;
2655 2656 2657 2658 2659 2660 2661 2662 2663
		if (atomic_read(&fcport->state) == FCS_ONLINE) {
			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
			       "Port to be marked lost on fcport=%06x, current "
			       "port state= %s comp_status %x.\n",
			       fcport->d_id.b24, port_state_str[FCS_ONLINE],
			       comp_status);

			qlt_schedule_sess_for_deletion(fcport);
		}
2664
		fallthrough;
2665
	case CS_ABORTED:
2666 2667 2668 2669 2670
	case CS_PORT_BUSY:
		fd->transferred_length = 0;
		iocb->u.nvme.rsp_pyld_len = 0;
		ret = QLA_ABORTED;
		break;
2671 2672
	case CS_DATA_UNDERRUN:
		break;
2673
	default:
2674
		ret = QLA_FUNCTION_FAILED;
2675
		break;
2676 2677 2678 2679
	}
	sp->done(sp, ret);
}

2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
    struct vp_ctrl_entry_24xx *vce)
{
	const char func[] = "CTRLVP-IOCB";
	srb_t *sp;
	int rval = QLA_SUCCESS;

	sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
	if (!sp)
		return;

	if (vce->entry_status != 0) {
		ql_dbg(ql_dbg_vport, vha, 0x10c4,
		    "%s: Failed to complete IOCB -- error status (%x)\n",
		    sp->name, vce->entry_status);
		rval = QLA_FUNCTION_FAILED;
	} else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
		ql_dbg(ql_dbg_vport, vha, 0x10c5,
		    "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
		    sp->name, le16_to_cpu(vce->comp_status),
		    le16_to_cpu(vce->vp_idx_failed));
		rval = QLA_FUNCTION_FAILED;
	} else {
		ql_dbg(ql_dbg_vport, vha, 0x10c6,
		    "Done %s.\n", __func__);
	}

	sp->rc = rval;
	sp->done(sp, rval);
}

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756
/* Process a single response queue entry. */
static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
					   struct rsp_que *rsp,
					   sts_entry_t *pkt)
{
	sts21_entry_t *sts21_entry;
	sts22_entry_t *sts22_entry;
	uint16_t handle_cnt;
	uint16_t cnt;

	switch (pkt->entry_type) {
	case STATUS_TYPE:
		qla2x00_status_entry(vha, rsp, pkt);
		break;
	case STATUS_TYPE_21:
		sts21_entry = (sts21_entry_t *)pkt;
		handle_cnt = sts21_entry->handle_count;
		for (cnt = 0; cnt < handle_cnt; cnt++)
			qla2x00_process_completed_request(vha, rsp->req,
						sts21_entry->handle[cnt]);
		break;
	case STATUS_TYPE_22:
		sts22_entry = (sts22_entry_t *)pkt;
		handle_cnt = sts22_entry->handle_count;
		for (cnt = 0; cnt < handle_cnt; cnt++)
			qla2x00_process_completed_request(vha, rsp->req,
						sts22_entry->handle[cnt]);
		break;
	case STATUS_CONT_TYPE:
		qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
		break;
	case MBX_IOCB_TYPE:
		qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
		break;
	case CT_IOCB_TYPE:
		qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
		break;
	default:
		/* Type Not Supported. */
		ql_log(ql_log_warn, vha, 0x504a,
		       "Received unknown response pkt type %x entry status=%x.\n",
		       pkt->entry_type, pkt->entry_status);
		break;
	}
}

L
Linus Torvalds 已提交
2757 2758
/**
 * qla2x00_process_response_queue() - Process response queue entries.
2759
 * @rsp: response queue
L
Linus Torvalds 已提交
2760 2761
 */
void
2762
qla2x00_process_response_queue(struct rsp_que *rsp)
L
Linus Torvalds 已提交
2763
{
2764 2765
	struct scsi_qla_host *vha;
	struct qla_hw_data *ha = rsp->hw;
2766
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
2767
	sts_entry_t	*pkt;
2768

2769
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
2770

2771
	if (!vha->flags.online)
L
Linus Torvalds 已提交
2772 2773
		return;

2774 2775
	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
		pkt = (sts_entry_t *)rsp->ring_ptr;
L
Linus Torvalds 已提交
2776

2777 2778 2779 2780
		rsp->ring_index++;
		if (rsp->ring_index == rsp->length) {
			rsp->ring_index = 0;
			rsp->ring_ptr = rsp->ring;
L
Linus Torvalds 已提交
2781
		} else {
2782
			rsp->ring_ptr++;
L
Linus Torvalds 已提交
2783 2784 2785
		}

		if (pkt->entry_status != 0) {
2786
			qla2x00_error_entry(vha, rsp, pkt);
L
Linus Torvalds 已提交
2787 2788 2789 2790 2791
			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
			wmb();
			continue;
		}

2792
		qla2x00_process_response_entry(vha, rsp, pkt);
L
Linus Torvalds 已提交
2793 2794 2795 2796 2797
		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
		wmb();
	}

	/* Adjust ring index */
2798
	wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
L
Linus Torvalds 已提交
2799 2800
}

2801
static inline void
2802
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2803
		     uint32_t sense_len, struct rsp_que *rsp, int res)
2804
{
2805
	struct scsi_qla_host *vha = sp->vha;
2806 2807
	struct scsi_cmnd *cp = GET_CMD_SP(sp);
	uint32_t track_sense_len;
2808 2809 2810 2811

	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
		sense_len = SCSI_SENSE_BUFFERSIZE;

2812 2813 2814 2815 2816
	SET_CMD_SENSE_LEN(sp, sense_len);
	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
	track_sense_len = sense_len;

	if (sense_len > par_sense_len)
2817
		sense_len = par_sense_len;
2818 2819 2820

	memcpy(cp->sense_buffer, sense_data, sense_len);

2821 2822 2823 2824 2825
	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
	track_sense_len -= sense_len;
	SET_CMD_SENSE_LEN(sp, track_sense_len);

	if (track_sense_len != 0) {
2826
		rsp->status_srb = sp;
2827 2828
		cp->result = res;
	}
2829

2830 2831
	if (sense_len) {
		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
H
Hannes Reinecke 已提交
2832
		    "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2833
		    sp->vha->host_no, cp->device->id, cp->device->lun,
2834
		    cp);
2835 2836
		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
		    cp->sense_buffer, sense_len);
2837
	}
2838 2839
}

2840 2841
struct scsi_dif_tuple {
	__be16 guard;       /* Checksum */
2842
	__be16 app_tag;         /* APPL identifier */
2843 2844 2845 2846 2847 2848 2849 2850 2851
	__be32 ref_tag;         /* Target LBA or indirect LBA */
};

/*
 * Checks the guard or meta-data for the type of error
 * detected by the HBA. In case of errors, we set the
 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
 * to indicate to the kernel that the HBA detected error.
 */
2852
static inline int
2853 2854
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
2855
	struct scsi_qla_host *vha = sp->vha;
2856
	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2857 2858
	uint8_t		*ap = &sts24->data[12];
	uint8_t		*ep = &sts24->data[20];
2859 2860 2861 2862
	uint32_t	e_ref_tag, a_ref_tag;
	uint16_t	e_app_tag, a_app_tag;
	uint16_t	e_guard, a_guard;

2863 2864 2865 2866
	/*
	 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
	 * would make guard field appear at offset 2
	 */
2867 2868 2869 2870 2871 2872
	a_guard   = get_unaligned_le16(ap + 2);
	a_app_tag = get_unaligned_le16(ap + 0);
	a_ref_tag = get_unaligned_le32(ap + 4);
	e_guard   = get_unaligned_le16(ep + 2);
	e_app_tag = get_unaligned_le16(ep + 0);
	e_ref_tag = get_unaligned_le32(ep + 4);
2873

2874 2875
	ql_dbg(ql_dbg_io, vha, 0x3023,
	    "iocb(s) %p Returned STATUS.\n", sts24);
2876

2877 2878
	ql_dbg(ql_dbg_io, vha, 0x3024,
	    "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2879
	    " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2880
	    " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2881
	    cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2882
	    a_app_tag, e_app_tag, a_guard, e_guard);
2883

2884 2885 2886 2887 2888
	/*
	 * Ignore sector if:
	 * For type     3: ref & app tag is all 'f's
	 * For type 0,1,2: app tag is all 'f's
	 */
2889 2890 2891
	if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
	    (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
	     a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907
		uint32_t blocks_done, resid;
		sector_t lba_s = scsi_get_lba(cmd);

		/* 2TB boundary case covered automatically with this */
		blocks_done = e_ref_tag - (uint32_t)lba_s + 1;

		resid = scsi_bufflen(cmd) - (blocks_done *
		    cmd->device->sector_size);

		scsi_set_resid(cmd, resid);
		cmd->result = DID_OK << 16;

		/* Update protection tag */
		if (scsi_prot_sg_count(cmd)) {
			uint32_t i, j = 0, k = 0, num_ent;
			struct scatterlist *sg;
2908
			struct t10_pi_tuple *spt;
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923

			/* Patch the corresponding protection tags */
			scsi_for_each_prot_sg(cmd, sg,
			    scsi_prot_sg_count(cmd), i) {
				num_ent = sg_dma_len(sg) / 8;
				if (k + num_ent < blocks_done) {
					k += num_ent;
					continue;
				}
				j = blocks_done - k - 1;
				k = blocks_done;
				break;
			}

			if (k != blocks_done) {
2924
				ql_log(ql_log_warn, vha, 0x302f,
2925 2926
				    "unexpected tag values tag:lba=%x:%llx)\n",
				    e_ref_tag, (unsigned long long)lba_s);
2927 2928 2929 2930 2931 2932
				return 1;
			}

			spt = page_address(sg_page(sg)) + sg->offset;
			spt += j;

2933
			spt->app_tag = T10_PI_APP_ESCAPE;
2934
			if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2935
				spt->ref_tag = T10_PI_REF_ESCAPE;
2936 2937 2938 2939 2940
		}

		return 0;
	}

2941 2942
	/* check guard */
	if (e_guard != a_guard) {
2943
		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2944
		set_host_byte(cmd, DID_ABORT);
2945
		return 1;
2946 2947
	}

2948 2949
	/* check ref tag */
	if (e_ref_tag != a_ref_tag) {
2950
		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2951
		set_host_byte(cmd, DID_ABORT);
2952
		return 1;
2953 2954
	}

2955 2956
	/* check appl tag */
	if (e_app_tag != a_app_tag) {
2957
		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2958
		set_host_byte(cmd, DID_ABORT);
2959
		return 1;
2960
	}
2961

2962
	return 1;
2963 2964
}

2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
static void
qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
				  struct req_que *req, uint32_t index)
{
	struct qla_hw_data *ha = vha->hw;
	srb_t *sp;
	uint16_t	comp_status;
	uint16_t	scsi_status;
	uint16_t thread_id;
	uint32_t rval = EXT_STATUS_OK;
2975
	struct bsg_job *bsg_job = NULL;
2976 2977
	struct fc_bsg_request *bsg_request;
	struct fc_bsg_reply *bsg_reply;
2978 2979
	sts_entry_t *sts = pkt;
	struct sts_entry_24xx *sts24 = pkt;
2980 2981

	/* Validate handle. */
2982
	if (index >= req->num_outstanding_cmds) {
2983 2984 2985 2986 2987 2988 2989
		ql_log(ql_log_warn, vha, 0x70af,
		    "Invalid SCSI completion handle 0x%x.\n", index);
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		return;
	}

	sp = req->outstanding_cmds[index];
2990
	if (!sp) {
2991 2992 2993 2994 2995 2996 2997 2998
		ql_log(ql_log_warn, vha, 0x70b0,
		    "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
		    req->id, index);

		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		return;
	}

2999 3000 3001 3002 3003 3004
	/* Free outstanding command slot. */
	req->outstanding_cmds[index] = NULL;
	bsg_job = sp->u.bsg_job;
	bsg_request = bsg_job->request;
	bsg_reply = bsg_job->reply;

3005 3006 3007 3008 3009 3010 3011 3012
	if (IS_FWI2_CAPABLE(ha)) {
		comp_status = le16_to_cpu(sts24->comp_status);
		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
	} else {
		comp_status = le16_to_cpu(sts->comp_status);
		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
	}

3013
	thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3014 3015 3016
	switch (comp_status) {
	case CS_COMPLETE:
		if (scsi_status == 0) {
3017
			bsg_reply->reply_payload_rcv_len =
3018
					bsg_job->reply_payload.payload_len;
3019
			vha->qla_stats.input_bytes +=
3020
				bsg_reply->reply_payload_rcv_len;
3021
			vha->qla_stats.input_requests++;
3022 3023 3024 3025 3026 3027
			rval = EXT_STATUS_OK;
		}
		goto done;

	case CS_DATA_OVERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b1,
3028
		    "Command completed with data overrun thread_id=%d\n",
3029 3030 3031 3032 3033 3034
		    thread_id);
		rval = EXT_STATUS_DATA_OVERRUN;
		break;

	case CS_DATA_UNDERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b2,
3035
		    "Command completed with data underrun thread_id=%d\n",
3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061
		    thread_id);
		rval = EXT_STATUS_DATA_UNDERRUN;
		break;
	case CS_BIDIR_RD_OVERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b3,
		    "Command completed with read data overrun thread_id=%d\n",
		    thread_id);
		rval = EXT_STATUS_DATA_OVERRUN;
		break;

	case CS_BIDIR_RD_WR_OVERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b4,
		    "Command completed with read and write data overrun "
		    "thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_OVERRUN;
		break;

	case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b5,
		    "Command completed with read data over and write data "
		    "underrun thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_OVERRUN;
		break;

	case CS_BIDIR_RD_UNDERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b6,
3062
		    "Command completed with read data underrun "
3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100
		    "thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_UNDERRUN;
		break;

	case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b7,
		    "Command completed with read data under and write data "
		    "overrun thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_UNDERRUN;
		break;

	case CS_BIDIR_RD_WR_UNDERRUN:
		ql_dbg(ql_dbg_user, vha, 0x70b8,
		    "Command completed with read and write data underrun "
		    "thread_id=%d\n", thread_id);
		rval = EXT_STATUS_DATA_UNDERRUN;
		break;

	case CS_BIDIR_DMA:
		ql_dbg(ql_dbg_user, vha, 0x70b9,
		    "Command completed with data DMA error thread_id=%d\n",
		    thread_id);
		rval = EXT_STATUS_DMA_ERR;
		break;

	case CS_TIMEOUT:
		ql_dbg(ql_dbg_user, vha, 0x70ba,
		    "Command completed with timeout thread_id=%d\n",
		    thread_id);
		rval = EXT_STATUS_TIMEOUT;
		break;
	default:
		ql_dbg(ql_dbg_user, vha, 0x70bb,
		    "Command completed with completion status=0x%x "
		    "thread_id=%d\n", comp_status, thread_id);
		rval = EXT_STATUS_ERR;
		break;
	}
3101
	bsg_reply->reply_payload_rcv_len = 0;
3102 3103 3104

done:
	/* Return the vendor specific reply to API */
3105
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
3106 3107 3108
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
	/* Always return DID_OK, bsg will send the vendor specific response
	 * in this case only */
3109
	sp->done(sp, DID_OK << 16);
3110 3111 3112

}

L
Linus Torvalds 已提交
3113 3114
/**
 * qla2x00_status_entry() - Process a Status IOCB entry.
3115 3116
 * @vha: SCSI driver HA context
 * @rsp: response queue
L
Linus Torvalds 已提交
3117 3118 3119
 * @pkt: Entry pointer
 */
static void
3120
qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
L
Linus Torvalds 已提交
3121 3122 3123 3124
{
	srb_t		*sp;
	fc_port_t	*fcport;
	struct scsi_cmnd *cp;
3125 3126
	sts_entry_t *sts = pkt;
	struct sts_entry_24xx *sts24 = pkt;
L
Linus Torvalds 已提交
3127 3128
	uint16_t	comp_status;
	uint16_t	scsi_status;
3129
	uint16_t	ox_id;
L
Linus Torvalds 已提交
3130 3131
	uint8_t		lscsi_status;
	int32_t		resid;
3132 3133
	uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
	    fw_resid_len;
3134
	uint8_t		*rsp_info, *sense_data;
3135
	struct qla_hw_data *ha = vha->hw;
3136 3137 3138
	uint32_t handle;
	uint16_t que;
	struct req_que *req;
3139
	int logit = 1;
3140
	int res = 0;
3141
	uint16_t state_flags = 0;
3142
	uint16_t sts_qual = 0;
3143

3144
	if (IS_FWI2_CAPABLE(ha)) {
3145 3146
		comp_status = le16_to_cpu(sts24->comp_status);
		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3147
		state_flags = le16_to_cpu(sts24->state_flags);
3148 3149 3150 3151
	} else {
		comp_status = le16_to_cpu(sts->comp_status);
		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
	}
3152 3153 3154
	handle = (uint32_t) LSW(sts->handle);
	que = MSW(sts->handle);
	req = ha->req_q_map[que];
3155

3156 3157 3158 3159 3160 3161 3162 3163 3164
	/* Check for invalid queue pointer */
	if (req == NULL ||
	    que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
		ql_dbg(ql_dbg_io, vha, 0x3059,
		    "Invalid status handle (0x%x): Bad req pointer. req=%p, "
		    "que=%u.\n", sts->handle, req, que);
		return;
	}

L
Linus Torvalds 已提交
3165
	/* Validate handle. */
3166
	if (handle < req->num_outstanding_cmds) {
3167
		sp = req->outstanding_cmds[handle];
3168 3169 3170 3171 3172 3173 3174
		if (!sp) {
			ql_dbg(ql_dbg_io, vha, 0x3075,
			    "%s(%ld): Already returned command for status handle (0x%x).\n",
			    __func__, vha->host_no, sts->handle);
			return;
		}
	} else {
3175
		ql_dbg(ql_dbg_io, vha, 0x3017,
3176 3177
		    "Invalid status handle, out of range (0x%x).\n",
		    sts->handle);
L
Linus Torvalds 已提交
3178

3179 3180 3181 3182 3183 3184 3185
		if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
			if (IS_P3P_TYPE(ha))
				set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
			else
				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
			qla2xxx_wake_dpc(vha);
		}
L
Linus Torvalds 已提交
3186 3187
		return;
	}
3188
	qla_put_iocbs(sp->qpair, &sp->iores);
3189

3190 3191 3192 3193 3194 3195 3196 3197
	if (sp->cmd_type != TYPE_SRB) {
		req->outstanding_cmds[handle] = NULL;
		ql_dbg(ql_dbg_io, vha, 0x3015,
		    "Unknown sp->cmd_type %x %p).\n",
		    sp->cmd_type, sp);
		return;
	}

3198 3199
	/* NVME completion. */
	if (sp->type == SRB_NVME_CMD) {
3200 3201
		req->outstanding_cmds[handle] = NULL;
		qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
3202 3203 3204
		return;
	}

3205 3206 3207 3208 3209
	if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
		qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
		return;
	}

3210 3211 3212 3213 3214 3215
	/* Task Management completion. */
	if (sp->type == SRB_TM_CMD) {
		qla24xx_tm_iocb_entry(vha, req, pkt);
		return;
	}

3216
	/* Fast path completion. */
3217
	qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
3218
	sp->qpair->cmd_completion_cnt++;
3219

3220 3221 3222 3223 3224 3225 3226
	if (comp_status == CS_COMPLETE && scsi_status == 0) {
		qla2x00_process_completed_request(vha, req, handle);

		return;
	}

	req->outstanding_cmds[handle] = NULL;
3227
	cp = GET_CMD_SP(sp);
L
Linus Torvalds 已提交
3228
	if (cp == NULL) {
3229
		ql_dbg(ql_dbg_io, vha, 0x3018,
3230 3231
		    "Command already returned (0x%x/%p).\n",
		    sts->handle, sp);
L
Linus Torvalds 已提交
3232 3233 3234 3235

		return;
	}

3236
	lscsi_status = scsi_status & STATUS_MASK;
L
Linus Torvalds 已提交
3237

3238
	fcport = sp->fcport;
L
Linus Torvalds 已提交
3239

3240
	ox_id = 0;
3241 3242
	sense_len = par_sense_len = rsp_info_len = resid_len =
	    fw_resid_len = 0;
3243
	if (IS_FWI2_CAPABLE(ha)) {
3244 3245 3246 3247 3248 3249 3250 3251
		if (scsi_status & SS_SENSE_LEN_VALID)
			sense_len = le32_to_cpu(sts24->sense_len);
		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
			rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
			resid_len = le32_to_cpu(sts24->rsp_residual_count);
		if (comp_status == CS_DATA_UNDERRUN)
			fw_resid_len = le32_to_cpu(sts24->residual_len);
3252 3253 3254
		rsp_info = sts24->data;
		sense_data = sts24->data;
		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3255
		ox_id = le16_to_cpu(sts24->ox_id);
3256
		par_sense_len = sizeof(sts24->data);
3257
		sts_qual = le16_to_cpu(sts24->status_qualifier);
3258
	} else {
3259 3260 3261 3262
		if (scsi_status & SS_SENSE_LEN_VALID)
			sense_len = le16_to_cpu(sts->req_sense_length);
		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
			rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3263 3264 3265
		resid_len = le32_to_cpu(sts->residual_length);
		rsp_info = sts->rsp_info;
		sense_data = sts->req_sense_data;
3266
		par_sense_len = sizeof(sts->req_sense_data);
3267 3268
	}

L
Linus Torvalds 已提交
3269 3270
	/* Check for any FCP transport errors. */
	if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3271
		/* Sense data lies beyond any FCP RESPONSE data. */
3272
		if (IS_FWI2_CAPABLE(ha)) {
3273
			sense_data += rsp_info_len;
3274 3275
			par_sense_len -= rsp_info_len;
		}
3276
		if (rsp_info_len > 3 && rsp_info[3]) {
3277
			ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3278 3279
			    "FCP I/O protocol failure (0x%x/0x%x).\n",
			    rsp_info_len, rsp_info[3]);
L
Linus Torvalds 已提交
3280

3281
			res = DID_BUS_BUSY << 16;
3282
			goto out;
L
Linus Torvalds 已提交
3283 3284 3285
		}
	}

3286 3287 3288 3289 3290
	/* Check for overrun. */
	if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
	    scsi_status & SS_RESIDUAL_OVER)
		comp_status = CS_DATA_OVERRUN;

3291 3292 3293 3294
	/*
	 * Check retry_delay_timer value if we receive a busy or
	 * queue full.
	 */
3295 3296 3297
	if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
		     lscsi_status == SAM_STAT_BUSY))
		qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3298

L
Linus Torvalds 已提交
3299 3300 3301 3302 3303
	/*
	 * Based on Host and scsi status generate status code for Linux
	 */
	switch (comp_status) {
	case CS_COMPLETE:
3304
	case CS_QUEUE_FULL:
L
Linus Torvalds 已提交
3305
		if (scsi_status == 0) {
3306
			res = DID_OK << 16;
L
Linus Torvalds 已提交
3307 3308 3309
			break;
		}
		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3310
			resid = resid_len;
3311
			scsi_set_resid(cp, resid);
3312 3313

			if (!lscsi_status &&
3314
			    ((unsigned)(scsi_bufflen(cp) - resid) <
3315
			     cp->underflow)) {
3316
				ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3317
				    "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3318
				    resid, scsi_bufflen(cp));
3319

3320
				res = DID_ERROR << 16;
3321 3322
				break;
			}
L
Linus Torvalds 已提交
3323
		}
3324
		res = DID_OK << 16 | lscsi_status;
L
Linus Torvalds 已提交
3325

3326
		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3327
			ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3328
			    "QUEUE FULL detected.\n");
3329 3330
			break;
		}
3331
		logit = 0;
L
Linus Torvalds 已提交
3332 3333 3334
		if (lscsi_status != SS_CHECK_CONDITION)
			break;

3335
		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
L
Linus Torvalds 已提交
3336 3337 3338
		if (!(scsi_status & SS_SENSE_LEN_VALID))
			break;

3339
		qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3340
		    rsp, res);
L
Linus Torvalds 已提交
3341 3342 3343
		break;

	case CS_DATA_UNDERRUN:
3344
		/* Use F/W calculated residual length. */
3345 3346 3347 3348
		resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
		scsi_set_resid(cp, resid);
		if (scsi_status & SS_RESIDUAL_UNDER) {
			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3349 3350 3351
				ql_log(ql_log_warn, fcport->vha, 0x301d,
				       "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
				       resid, scsi_bufflen(cp));
3352

3353 3354
				vha->interface_err_cnt++;

3355
				res = DID_ERROR << 16 | lscsi_status;
3356
				goto check_scsi_status;
3357
			}
3358

3359 3360 3361
			if (!lscsi_status &&
			    ((unsigned)(scsi_bufflen(cp) - resid) <
			    cp->underflow)) {
3362
				ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3363
				    "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3364
				    resid, scsi_bufflen(cp));
3365

3366
				res = DID_ERROR << 16;
3367 3368
				break;
			}
3369 3370 3371 3372 3373 3374 3375
		} else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
			    lscsi_status != SAM_STAT_BUSY) {
			/*
			 * scsi status of task set and busy are considered to be
			 * task not completed.
			 */

3376 3377 3378
			ql_log(ql_log_warn, fcport->vha, 0x301f,
			       "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
			       resid, scsi_bufflen(cp));
3379

3380 3381
			vha->interface_err_cnt++;

3382
			res = DID_ERROR << 16 | lscsi_status;
3383
			goto check_scsi_status;
3384 3385 3386 3387
		} else {
			ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
			    scsi_status, lscsi_status);
L
Linus Torvalds 已提交
3388 3389
		}

3390
		res = DID_OK << 16 | lscsi_status;
3391
		logit = 0;
3392

3393
check_scsi_status:
L
Linus Torvalds 已提交
3394
		/*
3395
		 * Check to see if SCSI Status is non zero. If so report SCSI
L
Linus Torvalds 已提交
3396 3397 3398
		 * Status.
		 */
		if (lscsi_status != 0) {
3399
			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3400
				ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3401
				    "QUEUE FULL detected.\n");
3402
				logit = 1;
3403 3404
				break;
			}
L
Linus Torvalds 已提交
3405 3406 3407
			if (lscsi_status != SS_CHECK_CONDITION)
				break;

3408
			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
L
Linus Torvalds 已提交
3409 3410 3411
			if (!(scsi_status & SS_SENSE_LEN_VALID))
				break;

3412
			qla2x00_handle_sense(sp, sense_data, par_sense_len,
3413
			    sense_len, rsp, res);
L
Linus Torvalds 已提交
3414 3415 3416 3417 3418 3419 3420 3421
		}
		break;

	case CS_PORT_LOGGED_OUT:
	case CS_PORT_CONFIG_CHG:
	case CS_PORT_BUSY:
	case CS_INCOMPLETE:
	case CS_PORT_UNAVAILABLE:
3422
	case CS_TIMEOUT:
3423 3424
	case CS_RESET:

3425 3426 3427 3428 3429
		/*
		 * We are going to have the fc class block the rport
		 * while we try to recover so instruct the mid layer
		 * to requeue until the class decides how to handle this.
		 */
3430
		res = DID_TRANSPORT_DISRUPTED << 16;
3431 3432 3433 3434 3435 3436 3437 3438 3439

		if (comp_status == CS_TIMEOUT) {
			if (IS_FWI2_CAPABLE(ha))
				break;
			else if ((le16_to_cpu(sts->status_flags) &
			    SF_LOGOUT_SENT) == 0)
				break;
		}

3440 3441 3442 3443 3444
		if (atomic_read(&fcport->state) == FCS_ONLINE) {
			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
				"Port to be marked lost on fcport=%02x%02x%02x, current "
				"port state= %s comp_status %x.\n", fcport->d_id.b.domain,
				fcport->d_id.b.area, fcport->d_id.b.al_pa,
3445
				port_state_str[FCS_ONLINE],
3446 3447
				comp_status);

3448
			qlt_schedule_sess_for_deletion(fcport);
3449 3450
		}

L
Linus Torvalds 已提交
3451 3452 3453
		break;

	case CS_ABORTED:
3454
		res = DID_RESET << 16;
L
Linus Torvalds 已提交
3455
		break;
3456 3457

	case CS_DIF_ERROR:
3458
		logit = qla2x00_handle_dif_error(sp, sts24);
3459
		res = cp->result;
3460
		break;
3461 3462 3463

	case CS_TRANSPORT:
		res = DID_ERROR << 16;
3464
		vha->hw_err_cnt++;
3465 3466 3467 3468 3469 3470 3471 3472 3473 3474

		if (!IS_PI_SPLIT_DET_CAPABLE(ha))
			break;

		if (state_flags & BIT_4)
			scmd_printk(KERN_WARNING, cp,
			    "Unsupported device '%s' found.\n",
			    cp->device->vendor);
		break;

3475 3476 3477 3478 3479 3480 3481 3482 3483 3484
	case CS_DMA:
		ql_log(ql_log_info, fcport->vha, 0x3022,
		    "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
		    comp_status, scsi_status, res, vha->host_no,
		    cp->device->id, cp->device->lun, fcport->d_id.b24,
		    ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
		    resid_len, fw_resid_len, sp, cp);
		ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
		    pkt, sizeof(*sts24));
		res = DID_ERROR << 16;
3485
		vha->hw_err_cnt++;
3486
		break;
L
Linus Torvalds 已提交
3487
	default:
3488
		res = DID_ERROR << 16;
L
Linus Torvalds 已提交
3489 3490 3491
		break;
	}

3492 3493
out:
	if (logit)
3494 3495 3496 3497 3498 3499 3500
		ql_log(ql_log_warn, fcport->vha, 0x3022,
		       "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
		       comp_status, scsi_status, res, vha->host_no,
		       cp->device->id, cp->device->lun, fcport->d_id.b.domain,
		       fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
		       cp->cmnd, scsi_bufflen(cp), rsp_info_len,
		       resid_len, fw_resid_len, sp, cp);
3501

3502
	if (rsp->status_srb == NULL)
3503
		sp->done(sp, res);
L
Linus Torvalds 已提交
3504 3505 3506 3507
}

/**
 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3508
 * @rsp: response queue
L
Linus Torvalds 已提交
3509 3510 3511 3512 3513
 * @pkt: Entry pointer
 *
 * Extended sense data.
 */
static void
3514
qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
L
Linus Torvalds 已提交
3515
{
3516
	uint8_t	sense_sz = 0;
3517
	struct qla_hw_data *ha = rsp->hw;
3518
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3519
	srb_t *sp = rsp->status_srb;
L
Linus Torvalds 已提交
3520
	struct scsi_cmnd *cp;
3521 3522
	uint32_t sense_len;
	uint8_t *sense_ptr;
L
Linus Torvalds 已提交
3523

3524 3525
	if (!sp || !GET_CMD_SENSE_LEN(sp))
		return;
L
Linus Torvalds 已提交
3526

3527 3528
	sense_len = GET_CMD_SENSE_LEN(sp);
	sense_ptr = GET_CMD_SENSE_PTR(sp);
L
Linus Torvalds 已提交
3529

3530 3531 3532 3533
	cp = GET_CMD_SP(sp);
	if (cp == NULL) {
		ql_log(ql_log_warn, vha, 0x3025,
		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
L
Linus Torvalds 已提交
3534

3535 3536
		rsp->status_srb = NULL;
		return;
L
Linus Torvalds 已提交
3537 3538
	}

3539 3540 3541 3542
	if (sense_len > sizeof(pkt->data))
		sense_sz = sizeof(pkt->data);
	else
		sense_sz = sense_len;
3543

3544 3545 3546 3547 3548 3549
	/* Move sense data. */
	if (IS_FWI2_CAPABLE(ha))
		host_to_fcp_swap(pkt->data, sizeof(pkt->data));
	memcpy(sense_ptr, pkt->data, sense_sz);
	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
		sense_ptr, sense_sz);
3550

3551 3552
	sense_len -= sense_sz;
	sense_ptr += sense_sz;
3553

3554 3555 3556 3557 3558 3559
	SET_CMD_SENSE_PTR(sp, sense_ptr);
	SET_CMD_SENSE_LEN(sp, sense_len);

	/* Place command on done queue. */
	if (sense_len == 0) {
		rsp->status_srb = NULL;
3560
		sp->done(sp, cp->result);
3561 3562 3563
	}
}

L
Linus Torvalds 已提交
3564 3565
/**
 * qla2x00_error_entry() - Process an error entry.
3566 3567
 * @vha: SCSI driver HA context
 * @rsp: response queue
L
Linus Torvalds 已提交
3568
 * @pkt: Entry pointer
3569
 * return : 1=allow further error analysis. 0=no additional error analysis.
L
Linus Torvalds 已提交
3570
 */
3571
static int
3572
qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
L
Linus Torvalds 已提交
3573 3574
{
	srb_t *sp;
3575
	struct qla_hw_data *ha = vha->hw;
3576
	const char func[] = "ERROR-IOCB";
3577
	uint16_t que = MSW(pkt->handle);
3578
	struct req_que *req = NULL;
3579
	int res = DID_ERROR << 16;
3580

3581
	ql_dbg(ql_dbg_async, vha, 0x502a,
3582 3583
	    "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
	    pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3584

3585 3586 3587 3588 3589
	if (que >= ha->max_req_queues || !ha->req_q_map[que])
		goto fatal;

	req = ha->req_q_map[que];

3590 3591
	if (pkt->entry_status & RF_BUSY)
		res = DID_BUS_BUSY << 16;
L
Linus Torvalds 已提交
3592

3593 3594
	if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
		return 0;
3595

3596 3597 3598 3599 3600 3601 3602 3603 3604
	switch (pkt->entry_type) {
	case NOTIFY_ACK_TYPE:
	case STATUS_TYPE:
	case STATUS_CONT_TYPE:
	case LOGINOUT_PORT_IOCB_TYPE:
	case CT_IOCB_TYPE:
	case ELS_IOCB_TYPE:
	case ABORT_IOCB_TYPE:
	case MBX_IOCB_TYPE:
3605
	default:
3606 3607
		sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
		if (sp) {
3608
			qla_put_iocbs(sp->qpair, &sp->iores);
3609 3610 3611 3612 3613
			sp->done(sp, res);
			return 0;
		}
		break;

3614
	case SA_UPDATE_IOCB_TYPE:
3615 3616 3617 3618
	case ABTS_RESP_24XX:
	case CTIO_TYPE7:
	case CTIO_CRC2:
		return 1;
L
Linus Torvalds 已提交
3619
	}
3620 3621
fatal:
	ql_log(ql_log_warn, vha, 0x5030,
3622
	    "Error entry - invalid handle/queue (%04x).\n", que);
3623
	return 0;
L
Linus Torvalds 已提交
3624 3625
}

3626 3627
/**
 * qla24xx_mbx_completion() - Process mailbox command completions.
3628
 * @vha: SCSI driver HA context
3629 3630 3631
 * @mb0: Mailbox0 register
 */
static void
3632
qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3633 3634
{
	uint16_t	cnt;
3635
	uint32_t	mboxes;
3636
	__le16 __iomem *wptr;
3637
	struct qla_hw_data *ha = vha->hw;
3638 3639
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

3640
	/* Read all mbox registers? */
3641 3642
	WARN_ON_ONCE(ha->mbx_count > 32);
	mboxes = (1ULL << ha->mbx_count) - 1;
3643
	if (!ha->mcp)
3644
		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3645 3646 3647
	else
		mboxes = ha->mcp->in_mb;

3648 3649 3650
	/* Load return mailbox registers. */
	ha->flags.mbox_int = 1;
	ha->mailbox_out[0] = mb0;
3651
	mboxes >>= 1;
3652
	wptr = &reg->mailbox1;
3653 3654

	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3655
		if (mboxes & BIT_0)
3656
			ha->mailbox_out[cnt] = rd_reg_word(wptr);
3657 3658

		mboxes >>= 1;
3659 3660 3661 3662
		wptr++;
	}
}

3663 3664 3665 3666 3667 3668
static void
qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
	struct abort_entry_24xx *pkt)
{
	const char func[] = "ABT_IOCB";
	srb_t *sp;
3669
	srb_t *orig_sp = NULL;
3670 3671 3672 3673 3674 3675 3676
	struct srb_iocb *abt;

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;

	abt = &sp->u.iocb_cmd;
3677
	abt->u.abt.comp_status = pkt->comp_status;
3678 3679 3680 3681 3682
	orig_sp = sp->cmd_sp;
	/* Need to pass original sp */
	if (orig_sp)
		qla_nvme_abort_process_comp_status(pkt, orig_sp);

3683
	sp->done(sp, 0);
3684 3685
}

3686 3687
void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
    struct pt_ls4_request *pkt, struct req_que *req)
3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
{
	srb_t *sp;
	const char func[] = "LS4_IOCB";
	uint16_t comp_status;

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;

	comp_status = le16_to_cpu(pkt->status);
	sp->done(sp, comp_status);
}

3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757
/**
 * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
 *   before iocb processing can start.
 * @vha: host adapter pointer
 * @rsp: respond queue
 * @pkt: head iocb describing how many continuation iocb
 * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
 */
static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
	struct rsp_que *rsp, response_t *pkt)
{
	int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
	response_t *end_pkt;
	int rc = 0;
	u32 rsp_q_in;

	if (pkt->entry_count == 1)
		return rc;

	/* ring_index was pre-increment. set it back to current pkt */
	if (rsp->ring_index == 0)
		start_pkt_ring_index = rsp->length - 1;
	else
		start_pkt_ring_index = rsp->ring_index - 1;

	if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
		end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
			rsp->length - 1;
	else
		end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;

	end_pkt = rsp->ring + end_pkt_ring_index;

	/*  next pkt = end_pkt + 1 */
	n_ring_index = end_pkt_ring_index + 1;
	if (n_ring_index >= rsp->length)
		n_ring_index = 0;

	rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
		rd_reg_dword(rsp->rsp_q_in);

	/* rsp_q_in is either wrapped or pointing beyond endpkt */
	if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
			rsp_q_in >= n_ring_index)
		/* all IOCBs arrived. */
		rc = 0;
	else
		rc = -EIO;

	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
	    "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
	    __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
	    rsp_q_in, rc);

	return rc;
}

3758 3759
/**
 * qla24xx_process_response_queue() - Process response queue entries.
3760 3761
 * @vha: SCSI driver HA context
 * @rsp: response queue
3762
 */
3763 3764
void qla24xx_process_response_queue(struct scsi_qla_host *vha,
	struct rsp_que *rsp)
3765 3766
{
	struct sts_entry_24xx *pkt;
3767
	struct qla_hw_data *ha = vha->hw;
3768
	struct purex_entry_24xx *purex_entry;
3769
	struct purex_item *pure_item;
3770

3771
	if (!ha->flags.fw_started)
3772 3773
		return;

3774 3775
	if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
		rsp->qpair->rcv_intr = 1;
3776
		qla_cpu_update(rsp->qpair, smp_processor_id());
3777
	}
3778

3779 3780
	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3781

3782 3783 3784 3785
		rsp->ring_index++;
		if (rsp->ring_index == rsp->length) {
			rsp->ring_index = 0;
			rsp->ring_ptr = rsp->ring;
3786
		} else {
3787
			rsp->ring_ptr++;
3788 3789 3790
		}

		if (pkt->entry_status != 0) {
3791
			if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3792
				goto process_err;
3793

3794 3795 3796 3797
			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
			wmb();
			continue;
		}
3798
process_err:
3799 3800 3801

		switch (pkt->entry_type) {
		case STATUS_TYPE:
3802
			qla2x00_status_entry(vha, rsp, pkt);
3803 3804
			break;
		case STATUS_CONT_TYPE:
3805
			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3806
			break;
3807
		case VP_RPT_ID_IOCB_TYPE:
3808
			qla24xx_report_id_acquisition(vha,
3809 3810
			    (struct vp_rpt_id_entry_24xx *)pkt);
			break;
3811 3812 3813 3814
		case LOGINOUT_PORT_IOCB_TYPE:
			qla24xx_logio_entry(vha, rsp->req,
			    (struct logio_entry_24xx *)pkt);
			break;
3815
		case CT_IOCB_TYPE:
3816 3817
			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
			break;
3818
		case ELS_IOCB_TYPE:
3819 3820
			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
			break;
3821
		case ABTS_RECV_24XX:
3822
			if (qla_ini_mode_enabled(vha)) {
3823 3824 3825 3826 3827
				pure_item = qla24xx_copy_std_pkt(vha, pkt);
				if (!pure_item)
					break;
				qla24xx_queue_purex_item(vha, pure_item,
							 qla24xx_process_abts);
3828 3829
				break;
			}
3830 3831
			if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
			    IS_QLA28XX(ha)) {
3832
				/* ensure that the ATIO queue is empty */
3833 3834
				qlt_handle_abts_recv(vha, rsp,
				    (response_t *)pkt);
3835 3836 3837 3838
				break;
			} else {
				qlt_24xx_process_atio_queue(vha, 1);
			}
3839
			fallthrough;
3840 3841
		case ABTS_RESP_24XX:
		case CTIO_TYPE7:
3842
		case CTIO_CRC2:
3843
			qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3844
			break;
3845 3846 3847 3848
		case PT_LS4_REQUEST:
			qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
			    rsp->req);
			break;
3849 3850
		case NOTIFY_ACK_TYPE:
			if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3851 3852
				qlt_response_pkt_all_vps(vha, rsp,
				    (response_t *)pkt);
3853 3854 3855 3856
			else
				qla24xxx_nack_iocb_entry(vha, rsp->req,
					(struct nack_to_isp *)pkt);
			break;
3857 3858 3859 3860 3861
		case MARKER_TYPE:
			/* Do nothing in this case, this check is to prevent it
			 * from falling into default case
			 */
			break;
3862 3863 3864 3865
		case ABORT_IOCB_TYPE:
			qla24xx_abort_iocb_entry(vha, rsp->req,
			    (struct abort_entry_24xx *)pkt);
			break;
3866
		case MBX_IOCB_TYPE:
3867 3868
			qla24xx_mbx_iocb_entry(vha, rsp->req,
			    (struct mbx_24xx_entry *)pkt);
3869
			break;
3870 3871 3872 3873
		case VP_CTRL_IOCB_TYPE:
			qla_ctrlvp_completed(vha, rsp->req,
			    (struct vp_ctrl_entry_24xx *)pkt);
			break;
3874
		case PUREX_IOCB_TYPE:
3875 3876 3877 3878 3879 3880 3881 3882
			purex_entry = (void *)pkt;
			switch (purex_entry->els_frame_payload[3]) {
			case ELS_RDP:
				pure_item = qla24xx_copy_std_pkt(vha, pkt);
				if (!pure_item)
					break;
				qla24xx_queue_purex_item(vha, pure_item,
						 qla24xx_process_purex_rdp);
3883
				break;
3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895
			case ELS_FPIN:
				if (!vha->hw->flags.scm_enabled) {
					ql_log(ql_log_warn, vha, 0x5094,
					       "SCM not active for this port\n");
					break;
				}
				pure_item = qla27xx_copy_fpin_pkt(vha,
							  (void **)&pkt, &rsp);
				if (!pure_item)
					break;
				qla24xx_queue_purex_item(vha, pure_item,
						 qla27xx_process_purex_fpin);
3896 3897
				break;

3898 3899 3900 3901 3902 3903 3904 3905 3906
			case ELS_AUTH_ELS:
				if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
					ql_dbg(ql_dbg_init, vha, 0x5091,
					    "Defer processing ELS opcode %#x...\n",
					    purex_entry->els_frame_payload[3]);
					return;
				}
				qla24xx_auth_els(vha, (void **)&pkt, &rsp);
				break;
3907 3908 3909 3910 3911
			default:
				ql_log(ql_log_warn, vha, 0x509c,
				       "Discarding ELS Request opcode 0x%x\n",
				       purex_entry->els_frame_payload[3]);
			}
3912
			break;
3913 3914 3915 3916 3917
		case SA_UPDATE_IOCB_TYPE:
			qla28xx_sa_update_iocb_entry(vha, rsp->req,
				(struct sa_update_28xx *)pkt);
			break;

3918 3919
		default:
			/* Type Not Supported. */
3920
			ql_dbg(ql_dbg_async, vha, 0x5042,
3921 3922
			       "Received unknown response pkt type 0x%x entry status=%x.\n",
			       pkt->entry_type, pkt->entry_status);
3923 3924 3925 3926 3927 3928 3929
			break;
		}
		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
		wmb();
	}

	/* Adjust ring index */
3930
	if (IS_P3P_TYPE(ha)) {
3931
		struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3932

3933
		wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index);
3934
	} else {
3935
		wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3936
	}
3937 3938
}

3939
static void
3940
qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3941 3942 3943
{
	int rval;
	uint32_t cnt;
3944
	struct qla_hw_data *ha = vha->hw;
3945 3946
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

3947
	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3948
	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3949 3950 3951
		return;

	rval = QLA_SUCCESS;
3952 3953 3954 3955
	wrt_reg_dword(&reg->iobase_addr, 0x7C00);
	rd_reg_dword(&reg->iobase_addr);
	wrt_reg_dword(&reg->iobase_window, 0x0001);
	for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
3956 3957
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt) {
3958
			wrt_reg_dword(&reg->iobase_window, 0x0001);
3959 3960 3961 3962 3963 3964 3965
			udelay(10);
		} else
			rval = QLA_FUNCTION_TIMEOUT;
	}
	if (rval == QLA_SUCCESS)
		goto next_test;

3966
	rval = QLA_SUCCESS;
3967 3968
	wrt_reg_dword(&reg->iobase_window, 0x0003);
	for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
3969 3970
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt) {
3971
			wrt_reg_dword(&reg->iobase_window, 0x0003);
3972 3973 3974 3975 3976 3977 3978 3979
			udelay(10);
		} else
			rval = QLA_FUNCTION_TIMEOUT;
	}
	if (rval != QLA_SUCCESS)
		goto done;

next_test:
3980
	if (rd_reg_dword(&reg->iobase_c8) & BIT_3)
3981 3982
		ql_log(ql_log_info, vha, 0x504c,
		    "Additional code -- 0x55AA.\n");
3983 3984

done:
3985 3986
	wrt_reg_dword(&reg->iobase_window, 0x0000);
	rd_reg_dword(&reg->iobase_window);
3987 3988
}

3989
/**
3990
 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3991
 * @irq: interrupt number
3992 3993 3994 3995 3996 3997 3998
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
3999
qla24xx_intr_handler(int irq, void *dev_id)
4000
{
4001 4002
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
4003 4004 4005 4006 4007
	struct device_reg_24xx __iomem *reg;
	int		status;
	unsigned long	iter;
	uint32_t	stat;
	uint32_t	hccr;
4008
	uint16_t	mb[8];
4009
	struct rsp_que *rsp;
4010
	unsigned long	flags;
4011
	bool process_atio = false;
4012

4013 4014
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
4015 4016
		ql_log(ql_log_info, NULL, 0x5059,
		    "%s: NULL response queue pointer.\n", __func__);
4017 4018 4019
		return IRQ_NONE;
	}

4020
	ha = rsp->hw;
4021 4022 4023
	reg = &ha->iobase->isp24;
	status = 0;

4024 4025 4026
	if (unlikely(pci_channel_offline(ha->pdev)))
		return IRQ_HANDLED;

4027
	spin_lock_irqsave(&ha->hardware_lock, flags);
4028
	vha = pci_get_drvdata(ha->pdev);
4029
	for (iter = 50; iter--; ) {
4030
		stat = rd_reg_dword(&reg->host_status);
4031
		if (qla2x00_check_reg32_for_disconnect(vha, stat))
4032
			break;
4033
		if (stat & HSRX_RISC_PAUSED) {
4034
			if (unlikely(pci_channel_offline(ha->pdev)))
4035 4036
				break;

4037
			hccr = rd_reg_dword(&reg->hccr);
4038

4039 4040 4041
			ql_log(ql_log_warn, vha, 0x504b,
			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
			    hccr);
4042

4043
			qla2xxx_check_risc_status(vha);
4044

4045
			ha->isp_ops->fw_dump(vha);
4046
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4047 4048 4049 4050 4051
			break;
		} else if ((stat & HSRX_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
4052 4053 4054 4055
		case INTR_ROM_MB_SUCCESS:
		case INTR_ROM_MB_FAILED:
		case INTR_MB_SUCCESS:
		case INTR_MB_FAILED:
4056
			qla24xx_mbx_completion(vha, MSW(stat));
4057 4058 4059
			status |= MBX_INTERRUPT;

			break;
4060
		case INTR_ASYNC_EVENT:
4061
			mb[0] = MSW(stat);
4062 4063 4064
			mb[1] = rd_reg_word(&reg->mailbox1);
			mb[2] = rd_reg_word(&reg->mailbox2);
			mb[3] = rd_reg_word(&reg->mailbox3);
4065
			qla2x00_async_event(vha, rsp, mb);
4066
			break;
4067 4068
		case INTR_RSP_QUE_UPDATE:
		case INTR_RSP_QUE_UPDATE_83XX:
4069
			qla24xx_process_response_queue(vha, rsp);
4070
			break;
4071
		case INTR_ATIO_QUE_UPDATE_27XX:
4072 4073
		case INTR_ATIO_QUE_UPDATE:
			process_atio = true;
4074
			break;
4075 4076
		case INTR_ATIO_RSP_QUE_UPDATE:
			process_atio = true;
4077 4078
			qla24xx_process_response_queue(vha, rsp);
			break;
4079
		default:
4080 4081
			ql_dbg(ql_dbg_async, vha, 0x504f,
			    "Unrecognized interrupt type (%d).\n", stat * 0xff);
4082 4083
			break;
		}
4084 4085
		wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
		rd_reg_dword_relaxed(&reg->hccr);
4086 4087
		if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
			ndelay(3500);
4088
	}
4089
	qla2x00_handle_mbx_completion(ha, status);
4090
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4091

4092 4093 4094 4095 4096 4097
	if (process_atio) {
		spin_lock_irqsave(&ha->tgt.atio_lock, flags);
		qlt_24xx_process_atio_queue(vha, 0);
		spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
	}

4098 4099 4100
	return IRQ_HANDLED;
}

4101 4102 4103
static irqreturn_t
qla24xx_msix_rsp_q(int irq, void *dev_id)
{
4104 4105
	struct qla_hw_data *ha;
	struct rsp_que *rsp;
4106
	struct device_reg_24xx __iomem *reg;
4107
	struct scsi_qla_host *vha;
4108
	unsigned long flags;
4109

4110 4111
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
4112 4113
		ql_log(ql_log_info, NULL, 0x505a,
		    "%s: NULL response queue pointer.\n", __func__);
4114 4115 4116
		return IRQ_NONE;
	}
	ha = rsp->hw;
4117 4118
	reg = &ha->iobase->isp24;

4119
	spin_lock_irqsave(&ha->hardware_lock, flags);
4120

4121
	vha = pci_get_drvdata(ha->pdev);
4122
	qla24xx_process_response_queue(vha, rsp);
4123
	if (!ha->flags.disable_msix_handshake) {
4124 4125
		wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
		rd_reg_dword_relaxed(&reg->hccr);
4126
	}
4127
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4128 4129 4130 4131 4132 4133 4134

	return IRQ_HANDLED;
}

static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
4135 4136 4137
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
	struct rsp_que *rsp;
4138 4139 4140 4141
	struct device_reg_24xx __iomem *reg;
	int		status;
	uint32_t	stat;
	uint32_t	hccr;
4142
	uint16_t	mb[8];
4143
	unsigned long flags;
4144
	bool process_atio = false;
4145

4146 4147
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
4148 4149
		ql_log(ql_log_info, NULL, 0x505c,
		    "%s: NULL response queue pointer.\n", __func__);
4150 4151 4152
		return IRQ_NONE;
	}
	ha = rsp->hw;
4153 4154 4155
	reg = &ha->iobase->isp24;
	status = 0;

4156
	spin_lock_irqsave(&ha->hardware_lock, flags);
4157
	vha = pci_get_drvdata(ha->pdev);
4158
	do {
4159
		stat = rd_reg_dword(&reg->host_status);
4160
		if (qla2x00_check_reg32_for_disconnect(vha, stat))
4161
			break;
4162
		if (stat & HSRX_RISC_PAUSED) {
4163
			if (unlikely(pci_channel_offline(ha->pdev)))
4164 4165
				break;

4166
			hccr = rd_reg_dword(&reg->hccr);
4167

4168 4169 4170
			ql_log(ql_log_info, vha, 0x5050,
			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
			    hccr);
4171

4172
			qla2xxx_check_risc_status(vha);
4173
			vha->hw_err_cnt++;
4174

4175
			ha->isp_ops->fw_dump(vha);
4176
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4177 4178 4179 4180 4181
			break;
		} else if ((stat & HSRX_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
4182 4183 4184 4185
		case INTR_ROM_MB_SUCCESS:
		case INTR_ROM_MB_FAILED:
		case INTR_MB_SUCCESS:
		case INTR_MB_FAILED:
4186
			qla24xx_mbx_completion(vha, MSW(stat));
4187 4188 4189
			status |= MBX_INTERRUPT;

			break;
4190
		case INTR_ASYNC_EVENT:
4191
			mb[0] = MSW(stat);
4192 4193 4194
			mb[1] = rd_reg_word(&reg->mailbox1);
			mb[2] = rd_reg_word(&reg->mailbox2);
			mb[3] = rd_reg_word(&reg->mailbox3);
4195
			qla2x00_async_event(vha, rsp, mb);
4196
			break;
4197 4198
		case INTR_RSP_QUE_UPDATE:
		case INTR_RSP_QUE_UPDATE_83XX:
4199
			qla24xx_process_response_queue(vha, rsp);
4200
			break;
4201
		case INTR_ATIO_QUE_UPDATE_27XX:
4202 4203
		case INTR_ATIO_QUE_UPDATE:
			process_atio = true;
4204
			break;
4205 4206
		case INTR_ATIO_RSP_QUE_UPDATE:
			process_atio = true;
4207 4208
			qla24xx_process_response_queue(vha, rsp);
			break;
4209
		default:
4210 4211
			ql_dbg(ql_dbg_async, vha, 0x5051,
			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
4212 4213
			break;
		}
4214
		wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
4215
	} while (0);
4216
	qla2x00_handle_mbx_completion(ha, status);
4217
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4218

4219 4220 4221 4222 4223 4224
	if (process_atio) {
		spin_lock_irqsave(&ha->tgt.atio_lock, flags);
		qlt_24xx_process_atio_queue(vha, 0);
		spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
	}

4225 4226 4227
	return IRQ_HANDLED;
}

4228 4229
irqreturn_t
qla2xxx_msix_rsp_q(int irq, void *dev_id)
4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241
{
	struct qla_hw_data *ha;
	struct qla_qpair *qpair;

	qpair = dev_id;
	if (!qpair) {
		ql_log(ql_log_info, NULL, 0x505b,
		    "%s: NULL response queue pointer.\n", __func__);
		return IRQ_NONE;
	}
	ha = qpair->hw;

4242
	queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4243 4244 4245 4246 4247 4248

	return IRQ_HANDLED;
}

irqreturn_t
qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262
{
	struct qla_hw_data *ha;
	struct qla_qpair *qpair;
	struct device_reg_24xx __iomem *reg;
	unsigned long flags;

	qpair = dev_id;
	if (!qpair) {
		ql_log(ql_log_info, NULL, 0x505b,
		    "%s: NULL response queue pointer.\n", __func__);
		return IRQ_NONE;
	}
	ha = qpair->hw;

4263 4264
	reg = &ha->iobase->isp24;
	spin_lock_irqsave(&ha->hardware_lock, flags);
4265
	wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
4266
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4267

4268
	queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4269 4270 4271 4272

	return IRQ_HANDLED;
}

4273 4274 4275 4276
/* Interrupt handling helpers. */

struct qla_init_msix_entry {
	const char *name;
4277
	irq_handler_t handler;
4278 4279
};

4280
static const struct qla_init_msix_entry msix_entries[] = {
4281 4282 4283 4284
	{ "default", qla24xx_msix_default },
	{ "rsp_q", qla24xx_msix_rsp_q },
	{ "atio_q", qla83xx_msix_atio_q },
	{ "qpair_multiq", qla2xxx_msix_rsp_q },
4285
	{ "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
4286 4287
};

4288
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
4289 4290 4291 4292
	{ "qla2xxx (default)", qla82xx_msix_default },
	{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};

4293
static int
4294
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4295 4296 4297
{
	int i, ret;
	struct qla_msix_entry *qentry;
4298
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4299
	int min_vecs = QLA_BASE_VECTORS;
4300 4301 4302 4303
	struct irq_affinity desc = {
		.pre_vectors = QLA_BASE_VECTORS,
	};

4304 4305
	if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
	    IS_ATIO_MSIX_CAPABLE(ha)) {
4306
		desc.pre_vectors++;
4307 4308
		min_vecs++;
	}
4309

4310
	if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4311 4312
		/* user wants to control IRQ setting for target mode */
		ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4313
		    min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4314
		    PCI_IRQ_MSIX);
4315 4316
	} else
		ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4317
		    min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4318
		    PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4319
		    &desc);
4320

4321 4322 4323 4324 4325 4326 4327
	if (ret < 0) {
		ql_log(ql_log_fatal, vha, 0x00c7,
		    "MSI-X: Failed to enable support, "
		    "giving   up -- %d/%d.\n",
		    ha->msix_count, ret);
		goto msix_out;
	} else if (ret < ha->msix_count) {
4328 4329
		ql_log(ql_log_info, vha, 0x00c6,
		    "MSI-X: Using %d vectors\n", ret);
4330
		ha->msix_count = ret;
4331
		/* Recalculate queue values */
4332
		if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344
			ha->max_req_queues = ha->msix_count - 1;

			/* ATIOQ needs 1 vector. That's 1 less QPair */
			if (QLA_TGT_MODE_ENABLED())
				ha->max_req_queues--;

			ha->max_rsp_queues = ha->max_req_queues;

			ha->max_qpairs = ha->max_req_queues - 1;
			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
			    "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
		}
4345
	}
4346
	vha->irq_offset = desc.pre_vectors;
4347 4348 4349
	ha->msix_entries = kcalloc(ha->msix_count,
				   sizeof(struct qla_msix_entry),
				   GFP_KERNEL);
4350
	if (!ha->msix_entries) {
4351 4352
		ql_log(ql_log_fatal, vha, 0x00c8,
		    "Failed to allocate memory for ha->msix_entries.\n");
4353
		ret = -ENOMEM;
4354
		goto free_irqs;
4355 4356 4357
	}
	ha->flags.msix_enabled = 1;

4358 4359
	for (i = 0; i < ha->msix_count; i++) {
		qentry = &ha->msix_entries[i];
4360 4361
		qentry->vector = pci_irq_vector(ha->pdev, i);
		qentry->entry = i;
4362
		qentry->have_irq = 0;
4363
		qentry->in_use = 0;
4364
		qentry->handle = NULL;
4365 4366
	}

4367
	/* Enable MSI-X vectors for the base queue */
4368
	for (i = 0; i < QLA_BASE_VECTORS; i++) {
4369
		qentry = &ha->msix_entries[i];
4370
		qentry->handle = rsp;
4371
		rsp->msix = qentry;
4372
		scnprintf(qentry->name, sizeof(qentry->name),
4373
		    "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4374
		if (IS_P3P_TYPE(ha))
4375 4376 4377
			ret = request_irq(qentry->vector,
				qla82xx_msix_entries[i].handler,
				0, qla82xx_msix_entries[i].name, rsp);
4378
		else
4379 4380
			ret = request_irq(qentry->vector,
				msix_entries[i].handler,
4381
				0, qentry->name, rsp);
4382 4383 4384
		if (ret)
			goto msix_register_fail;
		qentry->have_irq = 1;
4385
		qentry->in_use = 1;
4386 4387 4388 4389 4390 4391
	}

	/*
	 * If target mode is enable, also request the vector for the ATIO
	 * queue.
	 */
4392 4393
	if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
	    IS_ATIO_MSIX_CAPABLE(ha)) {
4394
		qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4395
		rsp->msix = qentry;
4396 4397
		qentry->handle = rsp;
		scnprintf(qentry->name, sizeof(qentry->name),
4398 4399
		    "qla2xxx%lu_%s", vha->host_no,
		    msix_entries[QLA_ATIO_VECTOR].name);
4400
		qentry->in_use = 1;
4401
		ret = request_irq(qentry->vector,
4402
			msix_entries[QLA_ATIO_VECTOR].handler,
4403
			0, qentry->name, rsp);
4404
		qentry->have_irq = 1;
4405 4406
	}

4407 4408 4409 4410 4411
msix_register_fail:
	if (ret) {
		ql_log(ql_log_fatal, vha, 0x00cb,
		    "MSI-X: unable to register handler -- %x/%d.\n",
		    qentry->vector, ret);
4412
		qla2x00_free_irqs(vha);
4413 4414 4415 4416
		ha->mqenable = 0;
		goto msix_out;
	}

4417
	/* Enable MSI-X vector for response queue update for queue 0 */
4418
	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4419
		if (ha->msixbase && ha->mqiobase &&
4420 4421
		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
		     ql2xmqsupport))
4422 4423
			ha->mqenable = 1;
	} else
4424 4425 4426
		if (ha->mqiobase &&
		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
		     ql2xmqsupport))
4427
			ha->mqenable = 1;
4428 4429 4430 4431 4432 4433
	ql_dbg(ql_dbg_multiq, vha, 0xc005,
	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
	ql_dbg(ql_dbg_init, vha, 0x0055,
	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4434

4435 4436
msix_out:
	return ret;
4437 4438 4439 4440

free_irqs:
	pci_free_irq_vectors(ha->pdev);
	goto msix_out;
4441 4442 4443
}

int
4444
qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4445
{
4446
	int ret = QLA_FUNCTION_FAILED;
4447
	device_reg_t *reg = ha->iobase;
4448
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4449 4450

	/* If possible, enable MSI-X. */
4451 4452
	if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
	    !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4453
	    !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4454 4455
		goto skip_msi;

4456 4457 4458
	if (ql2xenablemsix == 2)
		goto skip_msix;

4459 4460 4461 4462
	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
		(ha->pdev->subsystem_device == 0x7040 ||
		ha->pdev->subsystem_device == 0x7041 ||
		ha->pdev->subsystem_device == 0x1705)) {
4463 4464
		ql_log(ql_log_warn, vha, 0x0034,
		    "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4465
			ha->pdev->subsystem_vendor,
4466
			ha->pdev->subsystem_device);
4467 4468
		goto skip_msi;
	}
4469

4470
	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4471 4472
		ql_log(ql_log_warn, vha, 0x0035,
		    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4473
		    ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4474 4475 4476
		goto skip_msix;
	}

4477
	ret = qla24xx_enable_msix(ha, rsp);
4478
	if (!ret) {
4479 4480 4481
		ql_dbg(ql_dbg_init, vha, 0x0036,
		    "MSI-X: Enabled (0x%X, 0x%X).\n",
		    ha->chip_revision, ha->fw_attributes);
4482
		goto clear_risc_ints;
4483
	}
4484

4485
skip_msix:
4486

4487
	ql_log(ql_log_info, vha, 0x0037,
4488
	    "Falling back-to MSI mode -- ret=%d.\n", ret);
4489

4490
	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4491
	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4492
	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4493 4494
		goto skip_msi;

4495
	ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4496
	if (ret > 0) {
4497 4498
		ql_dbg(ql_dbg_init, vha, 0x0038,
		    "MSI: Enabled.\n");
4499
		ha->flags.msi_enabled = 1;
4500
	} else
4501
		ql_log(ql_log_warn, vha, 0x0039,
4502
		    "Falling back-to INTa mode -- ret=%d.\n", ret);
4503
skip_msi:
4504 4505 4506 4507 4508

	/* Skip INTx on ISP82xx. */
	if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
		return QLA_FUNCTION_FAILED;

4509
	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4510 4511
	    ha->flags.msi_enabled ? 0 : IRQF_SHARED,
	    QLA2XXX_DRIVER_NAME, rsp);
4512
	if (ret) {
4513
		ql_log(ql_log_warn, vha, 0x003a,
4514 4515
		    "Failed to reserve interrupt %d already in use.\n",
		    ha->pdev->irq);
4516
		goto fail;
4517
	} else if (!ha->flags.msi_enabled) {
4518 4519
		ql_dbg(ql_dbg_init, vha, 0x0125,
		    "INTa mode: Enabled.\n");
4520
		ha->flags.mr_intr_valid = 1;
4521 4522
		/* Set max_qpair to 0, as MSI-X and MSI in not enabled */
		ha->max_qpairs = 0;
4523
	}
4524

4525
clear_risc_ints:
4526 4527
	if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
		goto fail;
4528

4529
	spin_lock_irq(&ha->hardware_lock);
4530
	wrt_reg_word(&reg->isp.semaphore, 0);
4531
	spin_unlock_irq(&ha->hardware_lock);
4532

4533
fail:
4534 4535 4536 4537
	return ret;
}

void
4538
qla2x00_free_irqs(scsi_qla_host_t *vha)
4539
{
4540
	struct qla_hw_data *ha = vha->hw;
4541
	struct rsp_que *rsp;
4542 4543
	struct qla_msix_entry *qentry;
	int i;
4544 4545 4546 4547 4548 4549

	/*
	 * We need to check that ha->rsp_q_map is valid in case we are called
	 * from a probe failure context.
	 */
	if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4550
		goto free_irqs;
4551
	rsp = ha->rsp_q_map[0];
4552

4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568
	if (ha->flags.msix_enabled) {
		for (i = 0; i < ha->msix_count; i++) {
			qentry = &ha->msix_entries[i];
			if (qentry->have_irq) {
				irq_set_affinity_notifier(qentry->vector, NULL);
				free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
			}
		}
		kfree(ha->msix_entries);
		ha->msix_entries = NULL;
		ha->flags.msix_enabled = 0;
		ql_dbg(ql_dbg_init, vha, 0x0042,
			"Disabled MSI-X.\n");
	} else {
		free_irq(pci_irq_vector(ha->pdev, 0), rsp);
	}
4569

4570
free_irqs:
4571
	pci_free_irq_vectors(ha->pdev);
4572
}
4573

4574 4575
int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
	struct qla_msix_entry *msix, int vector_type)
4576
{
4577
	const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4578
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4579 4580
	int ret;

4581 4582 4583
	scnprintf(msix->name, sizeof(msix->name),
	    "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
	ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4584
	if (ret) {
4585 4586 4587
		ql_log(ql_log_fatal, vha, 0x00e6,
		    "MSI-X: Unable to register handler -- %x/%d.\n",
		    msix->vector, ret);
4588 4589 4590
		return ret;
	}
	msix->have_irq = 1;
4591
	msix->handle = qpair;
4592 4593
	return ret;
}
反馈
建议
客服 返回
顶部