qla_isr.c 69.5 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
A
Andrew Vasquez 已提交
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2011 QLogic Corporation
L
Linus Torvalds 已提交
4
 *
A
Andrew Vasquez 已提交
5
 * See LICENSE.qla2xxx for copyright and licensing details.
L
Linus Torvalds 已提交
6 7
 */
#include "qla_def.h"
8
#include "qla_target.h"
L
Linus Torvalds 已提交
9

10
#include <linux/delay.h>
11
#include <linux/slab.h>
12
#include <scsi/scsi_tcq.h>
13
#include <scsi/scsi_bsg_fc.h>
14
#include <scsi/scsi_eh.h>
15

L
Linus Torvalds 已提交
16
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17 18 19
static void qla2x00_process_completed_request(struct scsi_qla_host *,
	struct req_que *, uint32_t);
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
20
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
21 22
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
	sts_entry_t *);
23

L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31 32 33
/**
 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
 * @irq:
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
34
qla2100_intr_handler(int irq, void *dev_id)
L
Linus Torvalds 已提交
35
{
36 37
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
38
	struct device_reg_2xxx __iomem *reg;
L
Linus Torvalds 已提交
39 40
	int		status;
	unsigned long	iter;
41
	uint16_t	hccr;
42
	uint16_t	mb[4];
43
	struct rsp_que *rsp;
44
	unsigned long	flags;
L
Linus Torvalds 已提交
45

46 47
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
48 49
		ql_log(ql_log_info, NULL, 0x505d,
		    "%s: NULL response queue pointer.\n", __func__);
L
Linus Torvalds 已提交
50 51 52
		return (IRQ_NONE);
	}

53
	ha = rsp->hw;
54
	reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
55 56
	status = 0;

57
	spin_lock_irqsave(&ha->hardware_lock, flags);
58
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
59
	for (iter = 50; iter--; ) {
60 61 62 63 64 65 66
		hccr = RD_REG_WORD(&reg->hccr);
		if (hccr & HCCR_RISC_PAUSE) {
			if (pci_channel_offline(ha->pdev))
				break;

			/*
			 * Issue a "HARD" reset in order for the RISC interrupt
67
			 * bit to be cleared.  Schedule a big hammer to get
68 69 70 71 72
			 * out of the RISC PAUSED state.
			 */
			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
			RD_REG_WORD(&reg->hccr);

73 74
			ha->isp_ops->fw_dump(vha, 1);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
75 76
			break;
		} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
L
Linus Torvalds 已提交
77 78 79 80 81 82 83
			break;

		if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
			RD_REG_WORD(&reg->hccr);

			/* Get mailbox data. */
84 85
			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
86
				qla2x00_mbx_completion(vha, mb[0]);
L
Linus Torvalds 已提交
87
				status |= MBX_INTERRUPT;
88 89 90 91
			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
92
				qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
93 94
			} else {
				/*EMPTY*/
95 96 97
				ql_dbg(ql_dbg_async, vha, 0x5025,
				    "Unrecognized interrupt type (%d).\n",
				    mb[0]);
L
Linus Torvalds 已提交
98 99 100 101 102
			}
			/* Release mailbox registers. */
			WRT_REG_WORD(&reg->semaphore, 0);
			RD_REG_WORD(&reg->semaphore);
		} else {
103
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
104 105 106 107 108

			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
			RD_REG_WORD(&reg->hccr);
		}
	}
109
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
110 111 112 113

	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
114
		complete(&ha->mbx_intr_comp);
L
Linus Torvalds 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	}

	return (IRQ_HANDLED);
}

/**
 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
 * @irq:
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
130
qla2300_intr_handler(int irq, void *dev_id)
L
Linus Torvalds 已提交
131
{
132
	scsi_qla_host_t	*vha;
133
	struct device_reg_2xxx __iomem *reg;
L
Linus Torvalds 已提交
134 135 136 137
	int		status;
	unsigned long	iter;
	uint32_t	stat;
	uint16_t	hccr;
138
	uint16_t	mb[4];
139 140
	struct rsp_que *rsp;
	struct qla_hw_data *ha;
141
	unsigned long	flags;
L
Linus Torvalds 已提交
142

143 144
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
145 146
		ql_log(ql_log_info, NULL, 0x5058,
		    "%s: NULL response queue pointer.\n", __func__);
L
Linus Torvalds 已提交
147 148 149
		return (IRQ_NONE);
	}

150
	ha = rsp->hw;
151
	reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
152 153
	status = 0;

154
	spin_lock_irqsave(&ha->hardware_lock, flags);
155
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
156 157 158
	for (iter = 50; iter--; ) {
		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
		if (stat & HSR_RISC_PAUSED) {
159
			if (unlikely(pci_channel_offline(ha->pdev)))
160 161
				break;

L
Linus Torvalds 已提交
162 163
			hccr = RD_REG_WORD(&reg->hccr);
			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
164 165 166
				ql_log(ql_log_warn, vha, 0x5026,
				    "Parity error -- HCCR=%x, Dumping "
				    "firmware.\n", hccr);
L
Linus Torvalds 已提交
167
			else
168 169 170
				ql_log(ql_log_warn, vha, 0x5027,
				    "RISC paused -- HCCR=%x, Dumping "
				    "firmware.\n", hccr);
L
Linus Torvalds 已提交
171 172 173 174

			/*
			 * Issue a "HARD" reset in order for the RISC
			 * interrupt bit to be cleared.  Schedule a big
175
			 * hammer to get out of the RISC PAUSED state.
L
Linus Torvalds 已提交
176 177 178
			 */
			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
			RD_REG_WORD(&reg->hccr);
179

180 181
			ha->isp_ops->fw_dump(vha, 1);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
182 183 184 185 186 187 188 189 190
			break;
		} else if ((stat & HSR_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
		case 0x1:
		case 0x2:
		case 0x10:
		case 0x11:
191
			qla2x00_mbx_completion(vha, MSW(stat));
L
Linus Torvalds 已提交
192 193 194 195 196 197
			status |= MBX_INTERRUPT;

			/* Release mailbox registers. */
			WRT_REG_WORD(&reg->semaphore, 0);
			break;
		case 0x12:
198 199 200 201
			mb[0] = MSW(stat);
			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
202
			qla2x00_async_event(vha, rsp, mb);
203 204
			break;
		case 0x13:
205
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
206 207
			break;
		case 0x15:
208 209
			mb[0] = MBA_CMPLT_1_16BIT;
			mb[1] = MSW(stat);
210
			qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
211 212
			break;
		case 0x16:
213 214 215
			mb[0] = MBA_SCSI_COMPLETION;
			mb[1] = MSW(stat);
			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
216
			qla2x00_async_event(vha, rsp, mb);
L
Linus Torvalds 已提交
217 218
			break;
		default:
219 220
			ql_dbg(ql_dbg_async, vha, 0x5028,
			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
L
Linus Torvalds 已提交
221 222 223 224 225
			break;
		}
		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
		RD_REG_WORD_RELAXED(&reg->hccr);
	}
226
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
227 228 229 230

	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
231
		complete(&ha->mbx_intr_comp);
L
Linus Torvalds 已提交
232 233 234 235 236 237 238 239 240 241 242
	}

	return (IRQ_HANDLED);
}

/**
 * qla2x00_mbx_completion() - Process mailbox command completions.
 * @ha: SCSI driver HA context
 * @mb0: Mailbox0 register
 */
static void
243
qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
L
Linus Torvalds 已提交
244 245
{
	uint16_t	cnt;
246
	uint32_t	mboxes;
L
Linus Torvalds 已提交
247
	uint16_t __iomem *wptr;
248
	struct qla_hw_data *ha = vha->hw;
249
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
250

251 252 253 254 255 256 257
	/* Read all mbox registers? */
	mboxes = (1 << ha->mbx_count) - 1;
	if (!ha->mcp)
		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
	else
		mboxes = ha->mcp->in_mb;

L
Linus Torvalds 已提交
258 259 260
	/* Load return mailbox registers. */
	ha->flags.mbox_int = 1;
	ha->mailbox_out[0] = mb0;
261
	mboxes >>= 1;
L
Linus Torvalds 已提交
262 263 264
	wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);

	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
A
Andrew Vasquez 已提交
265
		if (IS_QLA2200(ha) && cnt == 8)
L
Linus Torvalds 已提交
266
			wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
267
		if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
L
Linus Torvalds 已提交
268
			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
269
		else if (mboxes & BIT_0)
L
Linus Torvalds 已提交
270
			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
A
Andrew Vasquez 已提交
271

L
Linus Torvalds 已提交
272
		wptr++;
273
		mboxes >>= 1;
L
Linus Torvalds 已提交
274 275 276
	}
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
static void
qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
{
	static char *event[] =
		{ "Complete", "Request Notification", "Time Extension" };
	int rval;
	struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
	uint16_t __iomem *wptr;
	uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];

	/* Seed data -- mailbox1 -> mailbox7. */
	wptr = (uint16_t __iomem *)&reg24->mailbox1;
	for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
		mb[cnt] = RD_REG_WORD(wptr);

292
	ql_dbg(ql_dbg_async, vha, 0x5021,
293
	    "Inter-Driver Communication %s -- "
294 295 296
	    "%04x %04x %04x %04x %04x %04x %04x.\n",
	    event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
	    mb[4], mb[5], mb[6]);
297 298 299 300 301 302

	/* Acknowledgement needed? [Notify && non-zero timeout]. */
	timeout = (descr >> 8) & 0xf;
	if (aen != MBA_IDC_NOTIFY || !timeout)
		return;

303
	ql_dbg(ql_dbg_async, vha, 0x5022,
304
	    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
305
	    vha->host_no, event[aen & 0xff], timeout);
306 307 308

	rval = qla2x00_post_idc_ack_work(vha, mb);
	if (rval != QLA_SUCCESS)
309
		ql_log(ql_log_warn, vha, 0x5023,
310 311 312
		    "IDC failed to post ACK.\n");
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
#define LS_UNKNOWN	2
char *
qla2x00_get_link_speed_str(struct qla_hw_data *ha)
{
	static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
	char *link_speed;
	int fw_speed = ha->link_data_rate;

	if (IS_QLA2100(ha) || IS_QLA2200(ha))
		link_speed = link_speeds[0];
	else if (fw_speed == 0x13)
		link_speed = link_speeds[6];
	else {
		link_speed = link_speeds[LS_UNKNOWN];
		if (fw_speed < 6)
			link_speed =
			    link_speeds[fw_speed];
	}

	return link_speed;
}

L
Linus Torvalds 已提交
335 336 337
/**
 * qla2x00_async_event() - Process aynchronous events.
 * @ha: SCSI driver HA context
338
 * @mb: Mailbox registers (0 - 3)
L
Linus Torvalds 已提交
339
 */
340
void
341
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
L
Linus Torvalds 已提交
342 343
{
	uint16_t	handle_cnt;
344
	uint16_t	cnt, mbx;
L
Linus Torvalds 已提交
345
	uint32_t	handles[5];
346
	struct qla_hw_data *ha = vha->hw;
347
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
348
	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
349
	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
L
Linus Torvalds 已提交
350
	uint32_t	rscn_entry, host_pid;
351
	unsigned long	flags;
L
Linus Torvalds 已提交
352 353 354

	/* Setup to process RIO completion. */
	handle_cnt = 0;
355
	if (IS_CNA_CAPABLE(ha))
356
		goto skip_rio;
L
Linus Torvalds 已提交
357 358
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:
359
		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
L
Linus Torvalds 已提交
360 361 362
		handle_cnt = 1;
		break;
	case MBA_CMPLT_1_16BIT:
363
		handles[0] = mb[1];
L
Linus Torvalds 已提交
364 365 366 367
		handle_cnt = 1;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_16BIT:
368 369
		handles[0] = mb[1];
		handles[1] = mb[2];
L
Linus Torvalds 已提交
370 371 372 373
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_3_16BIT:
374 375 376
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
377 378 379 380
		handle_cnt = 3;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_4_16BIT:
381 382 383
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
384 385 386 387 388
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handle_cnt = 4;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_5_16BIT:
389 390 391
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
L
Linus Torvalds 已提交
392 393 394 395 396 397
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
		handle_cnt = 5;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_32BIT:
398
		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
L
Linus Torvalds 已提交
399 400 401 402 403 404 405 406 407
		handles[1] = le32_to_cpu(
		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
		    RD_MAILBOX_REG(ha, reg, 6));
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	default:
		break;
	}
408
skip_rio:
L
Linus Torvalds 已提交
409 410
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:	/* Fast Post */
411
		if (!vha->flags.online)
L
Linus Torvalds 已提交
412 413 414
			break;

		for (cnt = 0; cnt < handle_cnt; cnt++)
415 416
			qla2x00_process_completed_request(vha, rsp->req,
				handles[cnt]);
L
Linus Torvalds 已提交
417 418 419
		break;

	case MBA_RESET:			/* Reset */
420 421
		ql_dbg(ql_dbg_async, vha, 0x5002,
		    "Asynchronous RESET.\n");
L
Linus Torvalds 已提交
422

423
		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
424 425 426
		break;

	case MBA_SYSTEM_ERR:		/* System Error */
427 428
		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
			RD_REG_WORD(&reg24->mailbox7) : 0;
429
		ql_log(ql_log_warn, vha, 0x5003,
430 431
		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
		    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
L
Linus Torvalds 已提交
432

433
		ha->isp_ops->fw_dump(vha, 1);
L
Linus Torvalds 已提交
434

435
		if (IS_FWI2_CAPABLE(ha)) {
436
			if (mb[1] == 0 && mb[2] == 0) {
437
				ql_log(ql_log_fatal, vha, 0x5004,
438 439
				    "Unrecoverable Hardware Error: adapter "
				    "marked OFFLINE!\n");
440
				vha->flags.online = 0;
441
				vha->device_flags |= DFLG_DEV_FAILED;
442
			} else {
L
Lucas De Marchi 已提交
443
				/* Check to see if MPI timeout occurred */
444 445 446 447
				if ((mbx & MBX_3) && (ha->flags.port0))
					set_bit(MPI_RESET_NEEDED,
					    &vha->dpc_flags);

448
				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
449
			}
450
		} else if (mb[1] == 0) {
451
			ql_log(ql_log_fatal, vha, 0x5005,
L
Linus Torvalds 已提交
452 453
			    "Unrecoverable Hardware Error: adapter marked "
			    "OFFLINE!\n");
454
			vha->flags.online = 0;
455
			vha->device_flags |= DFLG_DEV_FAILED;
L
Linus Torvalds 已提交
456
		} else
457
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
458 459 460
		break;

	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
461 462
		ql_log(ql_log_warn, vha, 0x5006,
		    "ISP Request Transfer Error (%x).\n",  mb[1]);
L
Linus Torvalds 已提交
463

464
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
465 466 467
		break;

	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
468 469
		ql_log(ql_log_warn, vha, 0x5007,
		    "ISP Response Transfer Error.\n");
L
Linus Torvalds 已提交
470

471
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
472 473 474
		break;

	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
475 476
		ql_dbg(ql_dbg_async, vha, 0x5008,
		    "Asynchronous WAKEUP_THRES.\n");
L
Linus Torvalds 已提交
477

478
		break;
L
Linus Torvalds 已提交
479
	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
480
		ql_dbg(ql_dbg_async, vha, 0x5009,
481
		    "LIP occurred (%x).\n", mb[1]);
L
Linus Torvalds 已提交
482

483 484 485 486
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
487 488
		}

489 490 491
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
492 493
		}

494 495
		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
496

497 498
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
L
Linus Torvalds 已提交
499 500 501
		break;

	case MBA_LOOP_UP:		/* Loop Up Event */
502
		if (IS_QLA2100(ha) || IS_QLA2200(ha))
503
			ha->link_data_rate = PORT_SPEED_1GB;
504
		else
L
Linus Torvalds 已提交
505 506
			ha->link_data_rate = mb[1];

507
		ql_dbg(ql_dbg_async, vha, 0x500a,
508 509
		    "LOOP UP detected (%s Gbps).\n",
		    qla2x00_get_link_speed_str(ha));
L
Linus Torvalds 已提交
510

511 512
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
L
Linus Torvalds 已提交
513 514 515
		break;

	case MBA_LOOP_DOWN:		/* Loop Down Event */
516 517
		mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
			? RD_REG_WORD(&reg24->mailbox4) : 0;
518
		mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
519
		ql_dbg(ql_dbg_async, vha, 0x500b,
520 521
		    "LOOP DOWN detected (%x %x %x %x).\n",
		    mb[1], mb[2], mb[3], mbx);
L
Linus Torvalds 已提交
522

523 524 525 526 527
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
			vha->device_flags |= DFLG_NO_CABLE;
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
528 529
		}

530 531 532
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
533 534
		}

535
		vha->flags.management_server_logged_in = 0;
536
		ha->link_data_rate = PORT_SPEED_UNKNOWN;
537
		qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
L
Linus Torvalds 已提交
538 539 540
		break;

	case MBA_LIP_RESET:		/* LIP reset occurred */
541
		ql_dbg(ql_dbg_async, vha, 0x500c,
542
		    "LIP reset occurred (%x).\n", mb[1]);
L
Linus Torvalds 已提交
543

544 545 546 547
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
548 549
		}

550 551 552
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
553 554
		}

555
		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
556 557

		ha->operating_mode = LOOP;
558 559
		vha->flags.management_server_logged_in = 0;
		qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
L
Linus Torvalds 已提交
560 561
		break;

562
	/* case MBA_DCBX_COMPLETE: */
L
Linus Torvalds 已提交
563 564 565 566
	case MBA_POINT_TO_POINT:	/* Point-to-Point */
		if (IS_QLA2100(ha))
			break;

567
		if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
568 569 570
			ql_dbg(ql_dbg_async, vha, 0x500d,
			    "DCBX Completed -- %04x %04x %04x.\n",
			    mb[1], mb[2], mb[3]);
571 572 573 574
			if (ha->notify_dcbx_comp)
				complete(&ha->dcbx_comp);

		} else
575 576
			ql_dbg(ql_dbg_async, vha, 0x500e,
			    "Asynchronous P2P MODE received.\n");
L
Linus Torvalds 已提交
577 578 579 580 581

		/*
		 * Until there's a transition from loop down to loop up, treat
		 * this as loop down only.
		 */
582 583 584 585
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			if (!atomic_read(&vha->loop_down_timer))
				atomic_set(&vha->loop_down_timer,
L
Linus Torvalds 已提交
586
				    LOOP_DOWN_TIME);
587
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
588 589
		}

590 591 592
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
593 594
		}

595 596 597 598 599
		if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
			set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);

		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
600 601

		ha->flags.gpsc_supported = 1;
602
		vha->flags.management_server_logged_in = 0;
L
Linus Torvalds 已提交
603 604 605 606 607 608
		break;

	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
		if (IS_QLA2100(ha))
			break;

609
		ql_dbg(ql_dbg_async, vha, 0x500f,
L
Linus Torvalds 已提交
610 611
		    "Configuration change detected: value=%x.\n", mb[1]);

612 613 614 615
		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
			atomic_set(&vha->loop_state, LOOP_DOWN);
			if (!atomic_read(&vha->loop_down_timer))
				atomic_set(&vha->loop_down_timer,
L
Linus Torvalds 已提交
616
				    LOOP_DOWN_TIME);
617
			qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
618 619
		}

620 621 622
		if (vha->vp_idx) {
			atomic_set(&vha->vp_state, VP_FAILED);
			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
623 624
		}

625 626
		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
L
Linus Torvalds 已提交
627 628 629
		break;

	case MBA_PORT_UPDATE:		/* Port database update */
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
		/*
		 * Handle only global and vn-port update events
		 *
		 * Relevant inputs:
		 * mb[1] = N_Port handle of changed port
		 * OR 0xffff for global event
		 * mb[2] = New login state
		 * 7 = Port logged out
		 * mb[3] = LSB is vp_idx, 0xff = all vps
		 *
		 * Skip processing if:
		 *       Event is global, vp_idx is NOT all vps,
		 *           vp_idx does not match
		 *       Event is not global, vp_idx does not match
		 */
645 646 647 648
		if (IS_QLA2XXX_MIDTYPE(ha) &&
		    ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
			(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
			break;
649

650 651
		/* Global event -- port logout or port unavailable. */
		if (mb[1] == 0xffff && mb[2] == 0x7) {
652 653 654
			ql_dbg(ql_dbg_async, vha, 0x5010,
			    "Port unavailable %04x %04x %04x.\n",
			    mb[1], mb[2], mb[3]);
655 656
			ql_log(ql_log_warn, vha, 0x505e,
			    "Link is offline.\n");
657 658 659 660 661 662 663 664 665 666 667 668 669

			if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
				atomic_set(&vha->loop_state, LOOP_DOWN);
				atomic_set(&vha->loop_down_timer,
				    LOOP_DOWN_TIME);
				vha->device_flags |= DFLG_NO_CABLE;
				qla2x00_mark_all_devices_lost(vha, 1);
			}

			if (vha->vp_idx) {
				atomic_set(&vha->vp_state, VP_FAILED);
				fc_vport_set_state(vha->fc_vport,
				    FC_VPORT_FAILED);
670
				qla2x00_mark_all_devices_lost(vha, 1);
671 672 673 674 675 676 677
			}

			vha->flags.management_server_logged_in = 0;
			ha->link_data_rate = PORT_SPEED_UNKNOWN;
			break;
		}

L
Linus Torvalds 已提交
678
		/*
679
		 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
L
Linus Torvalds 已提交
680 681 682
		 * event etc. earlier indicating loop is down) then process
		 * it.  Otherwise ignore it and Wait for RSCN to come in.
		 */
683 684 685
		atomic_set(&vha->loop_down_timer, 0);
		if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
		    atomic_read(&vha->loop_state) != LOOP_DEAD) {
686 687 688
			ql_dbg(ql_dbg_async, vha, 0x5011,
			    "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
			    mb[1], mb[2], mb[3]);
689 690

			qlt_async_event(mb[0], vha, mb);
L
Linus Torvalds 已提交
691 692 693
			break;
		}

694 695 696
		ql_dbg(ql_dbg_async, vha, 0x5012,
		    "Port database changed %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
697 698 699
		ql_log(ql_log_warn, vha, 0x505f,
		    "Link is operational (%s Gbps).\n",
		    qla2x00_get_link_speed_str(ha));
L
Linus Torvalds 已提交
700 701 702 703

		/*
		 * Mark all devices as missing so we will login again.
		 */
704
		atomic_set(&vha->loop_state, LOOP_UP);
L
Linus Torvalds 已提交
705

706
		qla2x00_mark_all_devices_lost(vha, 1);
L
Linus Torvalds 已提交
707

708 709 710
		if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
			set_bit(SCR_PENDING, &vha->dpc_flags);

711 712
		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
713 714

		qlt_async_event(mb[0], vha, mb);
L
Linus Torvalds 已提交
715 716 717
		break;

	case MBA_RSCN_UPDATE:		/* State Change Registration */
718
		/* Check if the Vport has issued a SCR */
719
		if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
720 721
			break;
		/* Only handle SCNs for our Vport index. */
722
		if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
723
			break;
724

725 726 727
		ql_dbg(ql_dbg_async, vha, 0x5013,
		    "RSCN database changed -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
728

729
		rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
730 731
		host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
				| vha->d_id.b.al_pa;
L
Linus Torvalds 已提交
732
		if (rscn_entry == host_pid) {
733 734 735
			ql_dbg(ql_dbg_async, vha, 0x5014,
			    "Ignoring RSCN update to local host "
			    "port ID (%06x).\n", host_pid);
L
Linus Torvalds 已提交
736 737 738
			break;
		}

739 740
		/* Ignore reserved bits from RSCN-payload. */
		rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
L
Linus Torvalds 已提交
741

742 743
		atomic_set(&vha->loop_down_timer, 0);
		vha->flags.management_server_logged_in = 0;
L
Linus Torvalds 已提交
744

745 746 747
		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
		set_bit(RSCN_UPDATE, &vha->dpc_flags);
		qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
L
Linus Torvalds 已提交
748 749 750 751
		break;

	/* case MBA_RIO_RESPONSE: */
	case MBA_ZIO_RESPONSE:
752 753
		ql_dbg(ql_dbg_async, vha, 0x5015,
		    "[R|Z]IO update completion.\n");
L
Linus Torvalds 已提交
754

755
		if (IS_FWI2_CAPABLE(ha))
756
			qla24xx_process_response_queue(vha, rsp);
757
		else
758
			qla2x00_process_response_queue(rsp);
L
Linus Torvalds 已提交
759
		break;
760 761

	case MBA_DISCARD_RND_FRAME:
762 763 764
		ql_dbg(ql_dbg_async, vha, 0x5016,
		    "Discard RND Frame -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
765
		break;
766 767

	case MBA_TRACE_NOTIFICATION:
768 769
		ql_dbg(ql_dbg_async, vha, 0x5017,
		    "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
770
		break;
771 772

	case MBA_ISP84XX_ALERT:
773 774 775
		ql_dbg(ql_dbg_async, vha, 0x5018,
		    "ISP84XX Alert Notification -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
776 777 778 779

		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
		switch (mb[1]) {
		case A84_PANIC_RECOVERY:
780 781 782
			ql_log(ql_log_info, vha, 0x5019,
			    "Alert 84XX: panic recovery %04x %04x.\n",
			    mb[2], mb[3]);
783 784 785
			break;
		case A84_OP_LOGIN_COMPLETE:
			ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
786 787 788
			ql_log(ql_log_info, vha, 0x501a,
			    "Alert 84XX: firmware version %x.\n",
			    ha->cs84xx->op_fw_version);
789 790 791
			break;
		case A84_DIAG_LOGIN_COMPLETE:
			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
792 793 794
			ql_log(ql_log_info, vha, 0x501b,
			    "Alert 84XX: diagnostic firmware version %x.\n",
			    ha->cs84xx->diag_fw_version);
795 796 797 798
			break;
		case A84_GOLD_LOGIN_COMPLETE:
			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
			ha->cs84xx->fw_update = 1;
799 800 801
			ql_log(ql_log_info, vha, 0x501c,
			    "Alert 84XX: gold firmware version %x.\n",
			    ha->cs84xx->gold_fw_version);
802 803
			break;
		default:
804 805
			ql_log(ql_log_warn, vha, 0x501d,
			    "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
806 807 808 809
			    mb[1], mb[2], mb[3]);
		}
		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
		break;
810
	case MBA_DCBX_START:
811 812 813
		ql_dbg(ql_dbg_async, vha, 0x501e,
		    "DCBX Started -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
814 815
		break;
	case MBA_DCBX_PARAM_UPDATE:
816 817 818
		ql_dbg(ql_dbg_async, vha, 0x501f,
		    "DCBX Parameters Updated -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
819 820
		break;
	case MBA_FCF_CONF_ERR:
821 822 823
		ql_dbg(ql_dbg_async, vha, 0x5020,
		    "FCF Configuration Error -- %04x %04x %04x.\n",
		    mb[1], mb[2], mb[3]);
824 825 826 827
		break;
	case MBA_IDC_COMPLETE:
	case MBA_IDC_NOTIFY:
	case MBA_IDC_TIME_EXT:
828
		qla81xx_idc_event(vha, mb[0], mb[1]);
829
		break;
830 831 832 833
	default:
		ql_dbg(ql_dbg_async, vha, 0x5057,
		    "Unknown AEN:%04x %04x %04x %04x\n",
		    mb[0], mb[1], mb[2], mb[3]);
L
Linus Torvalds 已提交
834
	}
835

836 837
	qlt_async_event(mb[0], vha, mb);

838
	if (!vha->vp_idx && ha->num_vhosts)
839
		qla2x00_alert_all_vps(rsp, mb);
L
Linus Torvalds 已提交
840 841 842 843 844 845 846 847
}

/**
 * qla2x00_process_completed_request() - Process a Fast Post response.
 * @ha: SCSI driver HA context
 * @index: SRB index
 */
static void
848 849
qla2x00_process_completed_request(struct scsi_qla_host *vha,
				struct req_que *req, uint32_t index)
L
Linus Torvalds 已提交
850 851
{
	srb_t *sp;
852
	struct qla_hw_data *ha = vha->hw;
L
Linus Torvalds 已提交
853 854 855

	/* Validate handle. */
	if (index >= MAX_OUTSTANDING_COMMANDS) {
856 857
		ql_log(ql_log_warn, vha, 0x3014,
		    "Invalid SCSI command index (%x).\n", index);
L
Linus Torvalds 已提交
858

859 860 861 862
		if (IS_QLA82XX(ha))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
863 864 865
		return;
	}

866
	sp = req->outstanding_cmds[index];
L
Linus Torvalds 已提交
867 868
	if (sp) {
		/* Free outstanding command slot. */
869
		req->outstanding_cmds[index] = NULL;
L
Linus Torvalds 已提交
870 871

		/* Save ISP completion status */
872
		sp->done(ha, sp, DID_OK << 16);
L
Linus Torvalds 已提交
873
	} else {
874
		ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
L
Linus Torvalds 已提交
875

876 877 878 879
		if (IS_QLA82XX(ha))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
L
Linus Torvalds 已提交
880 881 882
	}
}

883 884 885 886 887 888 889 890 891 892 893
static srb_t *
qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
    struct req_que *req, void *iocb)
{
	struct qla_hw_data *ha = vha->hw;
	sts_entry_t *pkt = iocb;
	srb_t *sp = NULL;
	uint16_t index;

	index = LSW(pkt->handle);
	if (index >= MAX_OUTSTANDING_COMMANDS) {
894 895
		ql_log(ql_log_warn, vha, 0x5031,
		    "Invalid command index (%x).\n", index);
896 897 898 899
		if (IS_QLA82XX(ha))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
900 901 902 903
		goto done;
	}
	sp = req->outstanding_cmds[index];
	if (!sp) {
904 905
		ql_log(ql_log_warn, vha, 0x5032,
		    "Invalid completion handle (%x) -- timed-out.\n", index);
906 907 908
		return sp;
	}
	if (sp->handle != index) {
909 910
		ql_log(ql_log_warn, vha, 0x5033,
		    "SRB handle (%x) mismatch %x.\n", sp->handle, index);
911 912
		return NULL;
	}
913

914
	req->outstanding_cmds[index] = NULL;
915

916 917 918 919 920 921 922 923 924 925 926 927
done:
	return sp;
}

static void
qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct mbx_entry *mbx)
{
	const char func[] = "MBX-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
928
	struct srb_iocb *lio;
929
	uint16_t *data;
930
	uint16_t status;
931 932 933 934 935

	sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
	if (!sp)
		return;

936 937
	lio = &sp->u.iocb_cmd;
	type = sp->name;
938
	fcport = sp->fcport;
939
	data = lio->u.logio.data;
940

941
	data[0] = MBS_COMMAND_ERROR;
942
	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
943
	    QLA_LOGIO_LOGIN_RETRIED : 0;
944
	if (mbx->entry_status) {
945
		ql_dbg(ql_dbg_async, vha, 0x5043,
946
		    "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
947
		    "entry-status=%x status=%x state-flag=%x "
948 949
		    "status-flags=%x.\n", type, sp->handle,
		    fcport->d_id.b.domain, fcport->d_id.b.area,
950 951
		    fcport->d_id.b.al_pa, mbx->entry_status,
		    le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
952
		    le16_to_cpu(mbx->status_flags));
953

954
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
955
		    (uint8_t *)mbx, sizeof(*mbx));
956

957
		goto logio_done;
958 959
	}

960
	status = le16_to_cpu(mbx->status);
961
	if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
962 963 964
	    le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
		status = 0;
	if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
965
		ql_dbg(ql_dbg_async, vha, 0x5045,
966 967 968 969
		    "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
		    type, sp->handle, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
		    le16_to_cpu(mbx->mb1));
970 971

		data[0] = MBS_COMMAND_COMPLETE;
972
		if (sp->type == SRB_LOGIN_CMD) {
973 974 975
			fcport->port_type = FCT_TARGET;
			if (le16_to_cpu(mbx->mb1) & BIT_0)
				fcport->port_type = FCT_INITIATOR;
976
			else if (le16_to_cpu(mbx->mb1) & BIT_1)
977
				fcport->flags |= FCF_FCP2_DEVICE;
978
		}
979
		goto logio_done;
980 981 982 983 984 985 986 987 988 989 990 991 992 993
	}

	data[0] = le16_to_cpu(mbx->mb0);
	switch (data[0]) {
	case MBS_PORT_ID_USED:
		data[1] = le16_to_cpu(mbx->mb1);
		break;
	case MBS_LOOP_ID_USED:
		break;
	default:
		data[0] = MBS_COMMAND_ERROR;
		break;
	}

994
	ql_log(ql_log_warn, vha, 0x5046,
995 996 997 998
	    "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
	    "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
	    status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
999
	    le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1000
	    le16_to_cpu(mbx->mb7));
1001

1002
logio_done:
1003
	sp->done(vha, sp, 0);
1004 1005
}

1006 1007 1008 1009 1010 1011 1012 1013 1014
static void
qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
    sts_entry_t *pkt, int iocb_type)
{
	const char func[] = "CT_IOCB";
	const char *type;
	srb_t *sp;
	struct fc_bsg_job *bsg_job;
	uint16_t comp_status;
1015
	int res;
1016 1017 1018 1019 1020

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;

1021
	bsg_job = sp->u.bsg_job;
1022

1023
	type = "ct pass-through";
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034

	comp_status = le16_to_cpu(pkt->comp_status);

	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
	 * fc payload  to the caller
	 */
	bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);

	if (comp_status != CS_COMPLETE) {
		if (comp_status == CS_DATA_UNDERRUN) {
1035
			res = DID_OK << 16;
1036 1037 1038
			bsg_job->reply->reply_payload_rcv_len =
			    le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);

1039 1040
			ql_log(ql_log_warn, vha, 0x5048,
			    "CT pass-through-%s error "
1041
			    "comp_status-status=0x%x total_byte = 0x%x.\n",
1042 1043
			    type, comp_status,
			    bsg_job->reply->reply_payload_rcv_len);
1044
		} else {
1045 1046 1047
			ql_log(ql_log_warn, vha, 0x5049,
			    "CT pass-through-%s error "
			    "comp_status-status=0x%x.\n", type, comp_status);
1048
			res = DID_ERROR << 16;
1049 1050
			bsg_job->reply->reply_payload_rcv_len = 0;
		}
1051
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1052
		    (uint8_t *)pkt, sizeof(*pkt));
1053
	} else {
1054
		res = DID_OK << 16;
1055 1056 1057 1058 1059
		bsg_job->reply->reply_payload_rcv_len =
		    bsg_job->reply_payload.payload_len;
		bsg_job->reply_len = 0;
	}

1060
	sp->done(vha, sp, res);
1061 1062
}

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
static void
qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct sts_entry_24xx *pkt, int iocb_type)
{
	const char func[] = "ELS_CT_IOCB";
	const char *type;
	srb_t *sp;
	struct fc_bsg_job *bsg_job;
	uint16_t comp_status;
	uint32_t fw_status[3];
	uint8_t* fw_sts_ptr;
1074
	int res;
1075 1076 1077 1078

	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
	if (!sp)
		return;
1079
	bsg_job = sp->u.bsg_job;
1080 1081

	type = NULL;
1082
	switch (sp->type) {
1083 1084 1085 1086 1087 1088 1089 1090
	case SRB_ELS_CMD_RPT:
	case SRB_ELS_CMD_HST:
		type = "els";
		break;
	case SRB_CT_CMD:
		type = "ct pass-through";
		break;
	default:
1091
		ql_dbg(ql_dbg_user, vha, 0x503e,
1092
		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
		return;
	}

	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
	fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
	fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);

	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
	 * fc payload  to the caller
	 */
	bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);

	if (comp_status != CS_COMPLETE) {
		if (comp_status == CS_DATA_UNDERRUN) {
1108
			res = DID_OK << 16;
1109
			bsg_job->reply->reply_payload_rcv_len =
1110
			    le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1111

1112
			ql_dbg(ql_dbg_user, vha, 0x503f,
1113
			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1114
			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1115
			    type, sp->handle, comp_status, fw_status[1], fw_status[2],
1116 1117
			    le16_to_cpu(((struct els_sts_entry_24xx *)
				pkt)->total_byte_count));
1118 1119 1120 1121
			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
		}
		else {
1122
			ql_dbg(ql_dbg_user, vha, 0x5040,
1123
			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1124
			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
1125
			    type, sp->handle, comp_status,
1126 1127 1128 1129
			    le16_to_cpu(((struct els_sts_entry_24xx *)
				pkt)->error_subcode_1),
			    le16_to_cpu(((struct els_sts_entry_24xx *)
				    pkt)->error_subcode_2));
1130
			res = DID_ERROR << 16;
1131 1132 1133 1134
			bsg_job->reply->reply_payload_rcv_len = 0;
			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
		}
1135
		ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1136
				(uint8_t *)pkt, sizeof(*pkt));
1137 1138
	}
	else {
1139
		res =  DID_OK << 16;
1140 1141 1142 1143
		bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
		bsg_job->reply_len = 0;
	}

1144
	sp->done(vha, sp, res);
1145 1146
}

1147 1148 1149 1150 1151 1152 1153 1154
static void
qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct logio_entry_24xx *logio)
{
	const char func[] = "LOGIO-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
1155
	struct srb_iocb *lio;
1156
	uint16_t *data;
1157 1158 1159 1160 1161 1162
	uint32_t iop[2];

	sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
	if (!sp)
		return;

1163 1164
	lio = &sp->u.iocb_cmd;
	type = sp->name;
1165
	fcport = sp->fcport;
1166
	data = lio->u.logio.data;
1167

1168
	data[0] = MBS_COMMAND_ERROR;
1169
	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1170
		QLA_LOGIO_LOGIN_RETRIED : 0;
1171
	if (logio->entry_status) {
1172
		ql_log(ql_log_warn, fcport->vha, 0x5034,
1173
		    "Async-%s error entry - hdl=%x"
1174
		    "portid=%02x%02x%02x entry-status=%x.\n",
1175 1176 1177 1178
		    type, sp->handle, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
		    logio->entry_status);
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1179
		    (uint8_t *)logio, sizeof(*logio));
1180

1181
		goto logio_done;
1182 1183 1184
	}

	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1185
		ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1186 1187 1188
		    "Async-%s complete - hdl=%x portid=%02x%02x%02x "
		    "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
1189
		    le32_to_cpu(logio->io_parameter[0]));
1190 1191

		data[0] = MBS_COMMAND_COMPLETE;
1192
		if (sp->type != SRB_LOGIN_CMD)
1193
			goto logio_done;
1194 1195 1196 1197 1198

		iop[0] = le32_to_cpu(logio->io_parameter[0]);
		if (iop[0] & BIT_4) {
			fcport->port_type = FCT_TARGET;
			if (iop[0] & BIT_8)
1199
				fcport->flags |= FCF_FCP2_DEVICE;
1200
		} else if (iop[0] & BIT_5)
1201
			fcport->port_type = FCT_INITIATOR;
1202

1203 1204 1205
		if (iop[0] & BIT_7)
			fcport->flags |= FCF_CONF_COMP_SUPPORTED;

1206 1207 1208 1209 1210
		if (logio->io_parameter[7] || logio->io_parameter[8])
			fcport->supported_classes |= FC_COS_CLASS2;
		if (logio->io_parameter[9] || logio->io_parameter[10])
			fcport->supported_classes |= FC_COS_CLASS3;

1211
		goto logio_done;
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
	}

	iop[0] = le32_to_cpu(logio->io_parameter[0]);
	iop[1] = le32_to_cpu(logio->io_parameter[1]);
	switch (iop[0]) {
	case LSC_SCODE_PORTID_USED:
		data[0] = MBS_PORT_ID_USED;
		data[1] = LSW(iop[1]);
		break;
	case LSC_SCODE_NPORT_USED:
		data[0] = MBS_LOOP_ID_USED;
		break;
	default:
		data[0] = MBS_COMMAND_ERROR;
		break;
	}

1229
	ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1230 1231
	    "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
	    "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1232
	    fcport->d_id.b.area, fcport->d_id.b.al_pa,
1233 1234
	    le16_to_cpu(logio->comp_status),
	    le32_to_cpu(logio->io_parameter[0]),
1235
	    le32_to_cpu(logio->io_parameter[1]));
1236

1237
logio_done:
1238
	sp->done(vha, sp, 0);
1239 1240
}

1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
static void
qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
    struct tsk_mgmt_entry *tsk)
{
	const char func[] = "TMF-IOCB";
	const char *type;
	fc_port_t *fcport;
	srb_t *sp;
	struct srb_iocb *iocb;
	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
	int error = 1;

	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
	if (!sp)
		return;

1257 1258
	iocb = &sp->u.iocb_cmd;
	type = sp->name;
1259 1260 1261
	fcport = sp->fcport;

	if (sts->entry_status) {
1262
		ql_log(ql_log_warn, fcport->vha, 0x5038,
1263 1264
		    "Async-%s error - hdl=%x entry-status(%x).\n",
		    type, sp->handle, sts->entry_status);
1265
	} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1266
		ql_log(ql_log_warn, fcport->vha, 0x5039,
1267 1268
		    "Async-%s error - hdl=%x completion status(%x).\n",
		    type, sp->handle, sts->comp_status);
1269 1270
	} else if (!(le16_to_cpu(sts->scsi_status) &
	    SS_RESPONSE_INFO_LEN_VALID)) {
1271
		ql_log(ql_log_warn, fcport->vha, 0x503a,
1272 1273
		    "Async-%s error - hdl=%x no response info(%x).\n",
		    type, sp->handle, sts->scsi_status);
1274
	} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1275
		ql_log(ql_log_warn, fcport->vha, 0x503b,
1276 1277
		    "Async-%s error - hdl=%x not enough response(%d).\n",
		    type, sp->handle, sts->rsp_data_len);
1278
	} else if (sts->data[3]) {
1279
		ql_log(ql_log_warn, fcport->vha, 0x503c,
1280 1281
		    "Async-%s error - hdl=%x response(%x).\n",
		    type, sp->handle, sts->data[3]);
1282 1283 1284 1285 1286 1287
	} else {
		error = 0;
	}

	if (error) {
		iocb->u.tmf.data = error;
1288 1289
		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
		    (uint8_t *)sts, sizeof(*sts));
1290 1291
	}

1292
	sp->done(vha, sp, 0);
1293 1294
}

L
Linus Torvalds 已提交
1295 1296 1297 1298 1299
/**
 * qla2x00_process_response_queue() - Process response queue entries.
 * @ha: SCSI driver HA context
 */
void
1300
qla2x00_process_response_queue(struct rsp_que *rsp)
L
Linus Torvalds 已提交
1301
{
1302 1303
	struct scsi_qla_host *vha;
	struct qla_hw_data *ha = rsp->hw;
1304
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
1305 1306 1307
	sts_entry_t	*pkt;
	uint16_t        handle_cnt;
	uint16_t        cnt;
1308

1309
	vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
1310

1311
	if (!vha->flags.online)
L
Linus Torvalds 已提交
1312 1313
		return;

1314 1315
	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
		pkt = (sts_entry_t *)rsp->ring_ptr;
L
Linus Torvalds 已提交
1316

1317 1318 1319 1320
		rsp->ring_index++;
		if (rsp->ring_index == rsp->length) {
			rsp->ring_index = 0;
			rsp->ring_ptr = rsp->ring;
L
Linus Torvalds 已提交
1321
		} else {
1322
			rsp->ring_ptr++;
L
Linus Torvalds 已提交
1323 1324 1325
		}

		if (pkt->entry_status != 0) {
1326
			qla2x00_error_entry(vha, rsp, pkt);
L
Linus Torvalds 已提交
1327 1328 1329 1330 1331 1332 1333
			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
			wmb();
			continue;
		}

		switch (pkt->entry_type) {
		case STATUS_TYPE:
1334
			qla2x00_status_entry(vha, rsp, pkt);
L
Linus Torvalds 已提交
1335 1336 1337 1338
			break;
		case STATUS_TYPE_21:
			handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
			for (cnt = 0; cnt < handle_cnt; cnt++) {
1339
				qla2x00_process_completed_request(vha, rsp->req,
L
Linus Torvalds 已提交
1340 1341 1342 1343 1344 1345
				    ((sts21_entry_t *)pkt)->handle[cnt]);
			}
			break;
		case STATUS_TYPE_22:
			handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
			for (cnt = 0; cnt < handle_cnt; cnt++) {
1346
				qla2x00_process_completed_request(vha, rsp->req,
L
Linus Torvalds 已提交
1347 1348 1349 1350
				    ((sts22_entry_t *)pkt)->handle[cnt]);
			}
			break;
		case STATUS_CONT_TYPE:
1351
			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
L
Linus Torvalds 已提交
1352
			break;
1353 1354 1355
		case MBX_IOCB_TYPE:
			qla2x00_mbx_iocb_entry(vha, rsp->req,
			    (struct mbx_entry *)pkt);
1356
			break;
1357 1358 1359
		case CT_IOCB_TYPE:
			qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
			break;
L
Linus Torvalds 已提交
1360 1361
		default:
			/* Type Not Supported. */
1362 1363
			ql_log(ql_log_warn, vha, 0x504a,
			    "Received unknown response pkt type %x "
L
Linus Torvalds 已提交
1364
			    "entry status=%x.\n",
1365
			    pkt->entry_type, pkt->entry_status);
L
Linus Torvalds 已提交
1366 1367 1368 1369 1370 1371 1372
			break;
		}
		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
		wmb();
	}

	/* Adjust ring index */
1373
	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
L
Linus Torvalds 已提交
1374 1375
}

1376
static inline void
1377
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1378
		     uint32_t sense_len, struct rsp_que *rsp, int res)
1379
{
1380
	struct scsi_qla_host *vha = sp->fcport->vha;
1381 1382
	struct scsi_cmnd *cp = GET_CMD_SP(sp);
	uint32_t track_sense_len;
1383 1384 1385 1386

	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
		sense_len = SCSI_SENSE_BUFFERSIZE;

1387 1388 1389 1390 1391
	SET_CMD_SENSE_LEN(sp, sense_len);
	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
	track_sense_len = sense_len;

	if (sense_len > par_sense_len)
1392
		sense_len = par_sense_len;
1393 1394 1395

	memcpy(cp->sense_buffer, sense_data, sense_len);

1396 1397 1398 1399 1400
	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
	track_sense_len -= sense_len;
	SET_CMD_SENSE_LEN(sp, track_sense_len);

	if (track_sense_len != 0) {
1401
		rsp->status_srb = sp;
1402 1403
		cp->result = res;
	}
1404

1405 1406 1407 1408 1409
	if (sense_len) {
		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
		    "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
		    sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
		    cp);
1410 1411
		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
		    cp->sense_buffer, sense_len);
1412
	}
1413 1414
}

1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
struct scsi_dif_tuple {
	__be16 guard;       /* Checksum */
	__be16 app_tag;         /* APPL identifer */
	__be32 ref_tag;         /* Target LBA or indirect LBA */
};

/*
 * Checks the guard or meta-data for the type of error
 * detected by the HBA. In case of errors, we set the
 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
 * to indicate to the kernel that the HBA detected error.
 */
1427
static inline int
1428 1429
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
1430
	struct scsi_qla_host *vha = sp->fcport->vha;
1431
	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1432 1433
	uint8_t		*ap = &sts24->data[12];
	uint8_t		*ep = &sts24->data[20];
1434 1435 1436 1437
	uint32_t	e_ref_tag, a_ref_tag;
	uint16_t	e_app_tag, a_app_tag;
	uint16_t	e_guard, a_guard;

1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
	/*
	 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
	 * would make guard field appear at offset 2
	 */
	a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
	a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
	a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
	e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
	e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
	e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1448

1449 1450
	ql_dbg(ql_dbg_io, vha, 0x3023,
	    "iocb(s) %p Returned STATUS.\n", sts24);
1451

1452 1453
	ql_dbg(ql_dbg_io, vha, 0x3024,
	    "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1454
	    " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1455
	    " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1456
	    cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1457
	    a_app_tag, e_app_tag, a_guard, e_guard);
1458

1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
	/*
	 * Ignore sector if:
	 * For type     3: ref & app tag is all 'f's
	 * For type 0,1,2: app tag is all 'f's
	 */
	if ((a_app_tag == 0xffff) &&
	    ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
	     (a_ref_tag == 0xffffffff))) {
		uint32_t blocks_done, resid;
		sector_t lba_s = scsi_get_lba(cmd);

		/* 2TB boundary case covered automatically with this */
		blocks_done = e_ref_tag - (uint32_t)lba_s + 1;

		resid = scsi_bufflen(cmd) - (blocks_done *
		    cmd->device->sector_size);

		scsi_set_resid(cmd, resid);
		cmd->result = DID_OK << 16;

		/* Update protection tag */
		if (scsi_prot_sg_count(cmd)) {
			uint32_t i, j = 0, k = 0, num_ent;
			struct scatterlist *sg;
			struct sd_dif_tuple *spt;

			/* Patch the corresponding protection tags */
			scsi_for_each_prot_sg(cmd, sg,
			    scsi_prot_sg_count(cmd), i) {
				num_ent = sg_dma_len(sg) / 8;
				if (k + num_ent < blocks_done) {
					k += num_ent;
					continue;
				}
				j = blocks_done - k - 1;
				k = blocks_done;
				break;
			}

			if (k != blocks_done) {
1499
				ql_log(ql_log_warn, vha, 0x302f,
1500 1501
				    "unexpected tag values tag:lba=%x:%llx)\n",
				    e_ref_tag, (unsigned long long)lba_s);
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
				return 1;
			}

			spt = page_address(sg_page(sg)) + sg->offset;
			spt += j;

			spt->app_tag = 0xffff;
			if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
				spt->ref_tag = 0xffffffff;
		}

		return 0;
	}

1516 1517 1518 1519 1520 1521 1522
	/* check guard */
	if (e_guard != a_guard) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
		    0x10, 0x1);
		set_driver_byte(cmd, DRIVER_SENSE);
		set_host_byte(cmd, DID_ABORT);
		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1523
		return 1;
1524 1525
	}

1526 1527
	/* check ref tag */
	if (e_ref_tag != a_ref_tag) {
1528
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1529
		    0x10, 0x3);
1530 1531 1532
		set_driver_byte(cmd, DRIVER_SENSE);
		set_host_byte(cmd, DID_ABORT);
		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1533
		return 1;
1534 1535
	}

1536 1537
	/* check appl tag */
	if (e_app_tag != a_app_tag) {
1538
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1539
		    0x10, 0x2);
1540 1541 1542
		set_driver_byte(cmd, DRIVER_SENSE);
		set_host_byte(cmd, DID_ABORT);
		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1543
		return 1;
1544
	}
1545

1546
	return 1;
1547 1548
}

L
Linus Torvalds 已提交
1549 1550 1551 1552 1553 1554
/**
 * qla2x00_status_entry() - Process a Status IOCB entry.
 * @ha: SCSI driver HA context
 * @pkt: Entry pointer
 */
static void
1555
qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
L
Linus Torvalds 已提交
1556 1557 1558 1559
{
	srb_t		*sp;
	fc_port_t	*fcport;
	struct scsi_cmnd *cp;
1560 1561
	sts_entry_t *sts;
	struct sts_entry_24xx *sts24;
L
Linus Torvalds 已提交
1562 1563
	uint16_t	comp_status;
	uint16_t	scsi_status;
1564
	uint16_t	ox_id;
L
Linus Torvalds 已提交
1565 1566
	uint8_t		lscsi_status;
	int32_t		resid;
1567 1568
	uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
	    fw_resid_len;
1569
	uint8_t		*rsp_info, *sense_data;
1570
	struct qla_hw_data *ha = vha->hw;
1571 1572 1573
	uint32_t handle;
	uint16_t que;
	struct req_que *req;
1574
	int logit = 1;
1575
	int res = 0;
1576 1577 1578

	sts = (sts_entry_t *) pkt;
	sts24 = (struct sts_entry_24xx *) pkt;
1579
	if (IS_FWI2_CAPABLE(ha)) {
1580 1581 1582 1583 1584 1585
		comp_status = le16_to_cpu(sts24->comp_status);
		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
	} else {
		comp_status = le16_to_cpu(sts->comp_status);
		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
	}
1586 1587 1588
	handle = (uint32_t) LSW(sts->handle);
	que = MSW(sts->handle);
	req = ha->req_q_map[que];
1589

L
Linus Torvalds 已提交
1590
	/* Fast path completion. */
1591
	if (comp_status == CS_COMPLETE && scsi_status == 0) {
1592
		qla2x00_process_completed_request(vha, req, handle);
L
Linus Torvalds 已提交
1593 1594 1595 1596 1597

		return;
	}

	/* Validate handle. */
1598 1599 1600
	if (handle < MAX_OUTSTANDING_COMMANDS) {
		sp = req->outstanding_cmds[handle];
		req->outstanding_cmds[handle] = NULL;
L
Linus Torvalds 已提交
1601 1602 1603 1604
	} else
		sp = NULL;

	if (sp == NULL) {
1605
		ql_dbg(ql_dbg_io, vha, 0x3017,
1606
		    "Invalid status handle (0x%x).\n", sts->handle);
L
Linus Torvalds 已提交
1607

1608 1609 1610 1611
		if (IS_QLA82XX(ha))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1612
		qla2xxx_wake_dpc(vha);
L
Linus Torvalds 已提交
1613 1614
		return;
	}
1615
	cp = GET_CMD_SP(sp);
L
Linus Torvalds 已提交
1616
	if (cp == NULL) {
1617
		ql_dbg(ql_dbg_io, vha, 0x3018,
1618 1619
		    "Command already returned (0x%x/%p).\n",
		    sts->handle, sp);
L
Linus Torvalds 已提交
1620 1621 1622 1623

		return;
	}

1624
  	lscsi_status = scsi_status & STATUS_MASK;
L
Linus Torvalds 已提交
1625

1626
	fcport = sp->fcport;
L
Linus Torvalds 已提交
1627

1628
	ox_id = 0;
1629 1630
	sense_len = par_sense_len = rsp_info_len = resid_len =
	    fw_resid_len = 0;
1631
	if (IS_FWI2_CAPABLE(ha)) {
1632 1633 1634 1635 1636 1637 1638 1639
		if (scsi_status & SS_SENSE_LEN_VALID)
			sense_len = le32_to_cpu(sts24->sense_len);
		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
			rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
			resid_len = le32_to_cpu(sts24->rsp_residual_count);
		if (comp_status == CS_DATA_UNDERRUN)
			fw_resid_len = le32_to_cpu(sts24->residual_len);
1640 1641 1642
		rsp_info = sts24->data;
		sense_data = sts24->data;
		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1643
		ox_id = le16_to_cpu(sts24->ox_id);
1644
		par_sense_len = sizeof(sts24->data);
1645
	} else {
1646 1647 1648 1649
		if (scsi_status & SS_SENSE_LEN_VALID)
			sense_len = le16_to_cpu(sts->req_sense_length);
		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
			rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1650 1651 1652
		resid_len = le32_to_cpu(sts->residual_length);
		rsp_info = sts->rsp_info;
		sense_data = sts->req_sense_data;
1653
		par_sense_len = sizeof(sts->req_sense_data);
1654 1655
	}

L
Linus Torvalds 已提交
1656 1657
	/* Check for any FCP transport errors. */
	if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1658
		/* Sense data lies beyond any FCP RESPONSE data. */
1659
		if (IS_FWI2_CAPABLE(ha)) {
1660
			sense_data += rsp_info_len;
1661 1662
			par_sense_len -= rsp_info_len;
		}
1663
		if (rsp_info_len > 3 && rsp_info[3]) {
1664
			ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
1665 1666
			    "FCP I/O protocol failure (0x%x/0x%x).\n",
			    rsp_info_len, rsp_info[3]);
L
Linus Torvalds 已提交
1667

1668
			res = DID_BUS_BUSY << 16;
1669
			goto out;
L
Linus Torvalds 已提交
1670 1671 1672
		}
	}

1673 1674 1675 1676 1677
	/* Check for overrun. */
	if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
	    scsi_status & SS_RESIDUAL_OVER)
		comp_status = CS_DATA_OVERRUN;

L
Linus Torvalds 已提交
1678 1679 1680 1681 1682
	/*
	 * Based on Host and scsi status generate status code for Linux
	 */
	switch (comp_status) {
	case CS_COMPLETE:
1683
	case CS_QUEUE_FULL:
L
Linus Torvalds 已提交
1684
		if (scsi_status == 0) {
1685
			res = DID_OK << 16;
L
Linus Torvalds 已提交
1686 1687 1688
			break;
		}
		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1689
			resid = resid_len;
1690
			scsi_set_resid(cp, resid);
1691 1692

			if (!lscsi_status &&
1693
			    ((unsigned)(scsi_bufflen(cp) - resid) <
1694
			     cp->underflow)) {
1695
				ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
1696
				    "Mid-layer underflow "
1697
				    "detected (0x%x of 0x%x bytes).\n",
1698
				    resid, scsi_bufflen(cp));
1699

1700
				res = DID_ERROR << 16;
1701 1702
				break;
			}
L
Linus Torvalds 已提交
1703
		}
1704
		res = DID_OK << 16 | lscsi_status;
L
Linus Torvalds 已提交
1705

1706
		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1707
			ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
1708
			    "QUEUE FULL detected.\n");
1709 1710
			break;
		}
1711
		logit = 0;
L
Linus Torvalds 已提交
1712 1713 1714
		if (lscsi_status != SS_CHECK_CONDITION)
			break;

1715
		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
L
Linus Torvalds 已提交
1716 1717 1718
		if (!(scsi_status & SS_SENSE_LEN_VALID))
			break;

1719
		qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1720
		    rsp, res);
L
Linus Torvalds 已提交
1721 1722 1723
		break;

	case CS_DATA_UNDERRUN:
1724
		/* Use F/W calculated residual length. */
1725 1726 1727 1728
		resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
		scsi_set_resid(cp, resid);
		if (scsi_status & SS_RESIDUAL_UNDER) {
			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1729
				ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
1730 1731 1732
				    "Dropped frame(s) detected "
				    "(0x%x of 0x%x bytes).\n",
				    resid, scsi_bufflen(cp));
1733

1734
				res = DID_ERROR << 16 | lscsi_status;
1735
				goto check_scsi_status;
1736
			}
1737

1738 1739 1740
			if (!lscsi_status &&
			    ((unsigned)(scsi_bufflen(cp) - resid) <
			    cp->underflow)) {
1741
				ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
1742
				    "Mid-layer underflow "
1743
				    "detected (0x%x of 0x%x bytes).\n",
1744
				    resid, scsi_bufflen(cp));
1745

1746
				res = DID_ERROR << 16;
1747 1748
				break;
			}
1749 1750 1751 1752 1753 1754 1755
		} else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
			    lscsi_status != SAM_STAT_BUSY) {
			/*
			 * scsi status of task set and busy are considered to be
			 * task not completed.
			 */

1756
			ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
1757
			    "Dropped frame(s) detected (0x%x "
1758 1759
			    "of 0x%x bytes).\n", resid,
			    scsi_bufflen(cp));
1760

1761
			res = DID_ERROR << 16 | lscsi_status;
1762
			goto check_scsi_status;
1763 1764 1765 1766
		} else {
			ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
			    scsi_status, lscsi_status);
L
Linus Torvalds 已提交
1767 1768
		}

1769
		res = DID_OK << 16 | lscsi_status;
1770
		logit = 0;
1771

1772
check_scsi_status:
L
Linus Torvalds 已提交
1773
		/*
A
Andrew Vasquez 已提交
1774
		 * Check to see if SCSI Status is non zero. If so report SCSI
L
Linus Torvalds 已提交
1775 1776 1777
		 * Status.
		 */
		if (lscsi_status != 0) {
1778
			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1779
				ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
1780
				    "QUEUE FULL detected.\n");
1781
				logit = 1;
1782 1783
				break;
			}
L
Linus Torvalds 已提交
1784 1785 1786
			if (lscsi_status != SS_CHECK_CONDITION)
				break;

1787
			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
L
Linus Torvalds 已提交
1788 1789 1790
			if (!(scsi_status & SS_SENSE_LEN_VALID))
				break;

1791
			qla2x00_handle_sense(sp, sense_data, par_sense_len,
1792
			    sense_len, rsp, res);
L
Linus Torvalds 已提交
1793 1794 1795 1796 1797 1798 1799 1800
		}
		break;

	case CS_PORT_LOGGED_OUT:
	case CS_PORT_CONFIG_CHG:
	case CS_PORT_BUSY:
	case CS_INCOMPLETE:
	case CS_PORT_UNAVAILABLE:
1801
	case CS_TIMEOUT:
1802 1803
	case CS_RESET:

1804 1805 1806 1807 1808
		/*
		 * We are going to have the fc class block the rport
		 * while we try to recover so instruct the mid layer
		 * to requeue until the class decides how to handle this.
		 */
1809
		res = DID_TRANSPORT_DISRUPTED << 16;
1810 1811 1812 1813 1814 1815 1816 1817 1818

		if (comp_status == CS_TIMEOUT) {
			if (IS_FWI2_CAPABLE(ha))
				break;
			else if ((le16_to_cpu(sts->status_flags) &
			    SF_LOGOUT_SENT) == 0)
				break;
		}

1819
		ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
1820 1821
		    "Port down status: port-state=0x%x.\n",
		    atomic_read(&fcport->state));
1822

1823
		if (atomic_read(&fcport->state) == FCS_ONLINE)
1824
			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
L
Linus Torvalds 已提交
1825 1826 1827
		break;

	case CS_ABORTED:
1828
		res = DID_RESET << 16;
L
Linus Torvalds 已提交
1829
		break;
1830 1831

	case CS_DIF_ERROR:
1832
		logit = qla2x00_handle_dif_error(sp, sts24);
1833
		break;
L
Linus Torvalds 已提交
1834
	default:
1835
		res = DID_ERROR << 16;
L
Linus Torvalds 已提交
1836 1837 1838
		break;
	}

1839 1840
out:
	if (logit)
1841
		ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
1842
		    "FCP command status: 0x%x-0x%x (0x%x) "
1843 1844
		    "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
		    "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
1845
		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1846
		    comp_status, scsi_status, res, vha->host_no,
1847 1848 1849 1850 1851
		    cp->device->id, cp->device->lun, fcport->d_id.b.domain,
		    fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
		    cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
		    cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
		    cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
1852
		    resid_len, fw_resid_len);
1853

1854
	if (rsp->status_srb == NULL)
1855
		sp->done(ha, sp, res);
L
Linus Torvalds 已提交
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
}

/**
 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
 * @ha: SCSI driver HA context
 * @pkt: Entry pointer
 *
 * Extended sense data.
 */
static void
1866
qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
L
Linus Torvalds 已提交
1867
{
1868
	uint8_t	sense_sz = 0;
1869
	struct qla_hw_data *ha = rsp->hw;
1870
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1871
	srb_t *sp = rsp->status_srb;
L
Linus Torvalds 已提交
1872
	struct scsi_cmnd *cp;
1873 1874
	uint32_t sense_len;
	uint8_t *sense_ptr;
L
Linus Torvalds 已提交
1875

1876 1877
	if (!sp || !GET_CMD_SENSE_LEN(sp))
		return;
L
Linus Torvalds 已提交
1878

1879 1880
	sense_len = GET_CMD_SENSE_LEN(sp);
	sense_ptr = GET_CMD_SENSE_PTR(sp);
L
Linus Torvalds 已提交
1881

1882 1883 1884 1885
	cp = GET_CMD_SP(sp);
	if (cp == NULL) {
		ql_log(ql_log_warn, vha, 0x3025,
		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
L
Linus Torvalds 已提交
1886

1887 1888
		rsp->status_srb = NULL;
		return;
L
Linus Torvalds 已提交
1889 1890
	}

1891 1892 1893 1894
	if (sense_len > sizeof(pkt->data))
		sense_sz = sizeof(pkt->data);
	else
		sense_sz = sense_len;
1895

1896 1897 1898 1899 1900 1901
	/* Move sense data. */
	if (IS_FWI2_CAPABLE(ha))
		host_to_fcp_swap(pkt->data, sizeof(pkt->data));
	memcpy(sense_ptr, pkt->data, sense_sz);
	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
		sense_ptr, sense_sz);
1902

1903 1904
	sense_len -= sense_sz;
	sense_ptr += sense_sz;
1905

1906 1907 1908 1909 1910 1911 1912
	SET_CMD_SENSE_PTR(sp, sense_ptr);
	SET_CMD_SENSE_LEN(sp, sense_len);

	/* Place command on done queue. */
	if (sense_len == 0) {
		rsp->status_srb = NULL;
		sp->done(ha, sp, cp->result);
1913 1914 1915
	}
}

L
Linus Torvalds 已提交
1916 1917 1918 1919 1920 1921
/**
 * qla2x00_error_entry() - Process an error entry.
 * @ha: SCSI driver HA context
 * @pkt: Entry pointer
 */
static void
1922
qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
L
Linus Torvalds 已提交
1923 1924
{
	srb_t *sp;
1925
	struct qla_hw_data *ha = vha->hw;
1926
	const char func[] = "ERROR-IOCB";
1927
	uint16_t que = MSW(pkt->handle);
1928
	struct req_que *req = NULL;
1929
	int res = DID_ERROR << 16;
1930

1931 1932 1933
	ql_dbg(ql_dbg_async, vha, 0x502a,
	    "type of error status in response: 0x%x\n", pkt->entry_status);

1934 1935 1936 1937 1938
	if (que >= ha->max_req_queues || !ha->req_q_map[que])
		goto fatal;

	req = ha->req_q_map[que];

1939 1940
	if (pkt->entry_status & RF_BUSY)
		res = DID_BUS_BUSY << 16;
L
Linus Torvalds 已提交
1941

1942
	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1943
	if (sp) {
1944
		sp->done(ha, sp, res);
1945
		return;
L
Linus Torvalds 已提交
1946
	}
1947 1948 1949 1950 1951 1952 1953 1954 1955
fatal:
	ql_log(ql_log_warn, vha, 0x5030,
	    "Error entry - invalid handle/queue.\n");

	if (IS_QLA82XX(ha))
		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
	else
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
	qla2xxx_wake_dpc(vha);
L
Linus Torvalds 已提交
1956 1957
}

1958 1959 1960 1961 1962 1963
/**
 * qla24xx_mbx_completion() - Process mailbox command completions.
 * @ha: SCSI driver HA context
 * @mb0: Mailbox0 register
 */
static void
1964
qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1965 1966
{
	uint16_t	cnt;
1967
	uint32_t	mboxes;
1968
	uint16_t __iomem *wptr;
1969
	struct qla_hw_data *ha = vha->hw;
1970 1971
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

1972 1973 1974 1975 1976 1977 1978
	/* Read all mbox registers? */
	mboxes = (1 << ha->mbx_count) - 1;
	if (!ha->mcp)
		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
	else
		mboxes = ha->mcp->in_mb;

1979 1980 1981
	/* Load return mailbox registers. */
	ha->flags.mbox_int = 1;
	ha->mailbox_out[0] = mb0;
1982
	mboxes >>= 1;
1983 1984 1985
	wptr = (uint16_t __iomem *)&reg->mailbox1;

	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1986 1987 1988 1989
		if (mboxes & BIT_0)
			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);

		mboxes >>= 1;
1990 1991 1992 1993 1994 1995 1996 1997
		wptr++;
	}
}

/**
 * qla24xx_process_response_queue() - Process response queue entries.
 * @ha: SCSI driver HA context
 */
1998 1999
void qla24xx_process_response_queue(struct scsi_qla_host *vha,
	struct rsp_que *rsp)
2000 2001
{
	struct sts_entry_24xx *pkt;
2002
	struct qla_hw_data *ha = vha->hw;
2003

2004
	if (!vha->flags.online)
2005 2006
		return;

2007 2008
	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2009

2010 2011 2012 2013
		rsp->ring_index++;
		if (rsp->ring_index == rsp->length) {
			rsp->ring_index = 0;
			rsp->ring_ptr = rsp->ring;
2014
		} else {
2015
			rsp->ring_ptr++;
2016 2017 2018
		}

		if (pkt->entry_status != 0) {
2019
			qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2020 2021 2022

			(void)qlt_24xx_process_response_error(vha, pkt);

2023 2024 2025 2026 2027 2028 2029
			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
			wmb();
			continue;
		}

		switch (pkt->entry_type) {
		case STATUS_TYPE:
2030
			qla2x00_status_entry(vha, rsp, pkt);
2031 2032
			break;
		case STATUS_CONT_TYPE:
2033
			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2034
			break;
2035
		case VP_RPT_ID_IOCB_TYPE:
2036
			qla24xx_report_id_acquisition(vha,
2037 2038
			    (struct vp_rpt_id_entry_24xx *)pkt);
			break;
2039 2040 2041 2042
		case LOGINOUT_PORT_IOCB_TYPE:
			qla24xx_logio_entry(vha, rsp->req,
			    (struct logio_entry_24xx *)pkt);
			break;
2043 2044 2045 2046
		case TSK_MGMT_IOCB_TYPE:
			qla24xx_tm_iocb_entry(vha, rsp->req,
			    (struct tsk_mgmt_entry *)pkt);
			break;
2047 2048 2049 2050 2051 2052
                case CT_IOCB_TYPE:
			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
			break;
                case ELS_IOCB_TYPE:
			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
			break;
2053 2054 2055 2056 2057 2058 2059 2060
		case ABTS_RECV_24XX:
			/* ensure that the ATIO queue is empty */
			qlt_24xx_process_atio_queue(vha);
		case ABTS_RESP_24XX:
		case CTIO_TYPE7:
		case NOTIFY_ACK_TYPE:
			qlt_response_pkt_all_vps(vha, (response_t *)pkt);
			break;
2061 2062 2063 2064 2065
		case MARKER_TYPE:
			/* Do nothing in this case, this check is to prevent it
			 * from falling into default case
			 */
			break;
2066 2067
		default:
			/* Type Not Supported. */
2068 2069
			ql_dbg(ql_dbg_async, vha, 0x5042,
			    "Received unknown response pkt type %x "
2070
			    "entry status=%x.\n",
2071
			    pkt->entry_type, pkt->entry_status);
2072 2073 2074 2075 2076 2077 2078
			break;
		}
		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
		wmb();
	}

	/* Adjust ring index */
2079 2080 2081 2082 2083
	if (IS_QLA82XX(ha)) {
		struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
		WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
	} else
		WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2084 2085
}

2086
static void
2087
qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2088 2089 2090
{
	int rval;
	uint32_t cnt;
2091
	struct qla_hw_data *ha = vha->hw;
2092 2093
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

2094
	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
		return;

	rval = QLA_SUCCESS;
	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
	RD_REG_DWORD(&reg->iobase_addr);
	WRT_REG_DWORD(&reg->iobase_window, 0x0001);
	for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt) {
			WRT_REG_DWORD(&reg->iobase_window, 0x0001);
			udelay(10);
		} else
			rval = QLA_FUNCTION_TIMEOUT;
	}
	if (rval == QLA_SUCCESS)
		goto next_test;

	WRT_REG_DWORD(&reg->iobase_window, 0x0003);
	for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt) {
			WRT_REG_DWORD(&reg->iobase_window, 0x0003);
			udelay(10);
		} else
			rval = QLA_FUNCTION_TIMEOUT;
	}
	if (rval != QLA_SUCCESS)
		goto done;

next_test:
	if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2126 2127
		ql_log(ql_log_info, vha, 0x504c,
		    "Additional code -- 0x55AA.\n");
2128 2129 2130 2131 2132 2133

done:
	WRT_REG_DWORD(&reg->iobase_window, 0x0000);
	RD_REG_DWORD(&reg->iobase_window);
}

2134
/**
2135
 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2136 2137 2138 2139 2140 2141 2142 2143
 * @irq:
 * @dev_id: SCSI driver HA context
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
2144
qla24xx_intr_handler(int irq, void *dev_id)
2145
{
2146 2147
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
2148 2149 2150 2151 2152 2153
	struct device_reg_24xx __iomem *reg;
	int		status;
	unsigned long	iter;
	uint32_t	stat;
	uint32_t	hccr;
	uint16_t	mb[4];
2154
	struct rsp_que *rsp;
2155
	unsigned long	flags;
2156

2157 2158
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
2159 2160
		ql_log(ql_log_info, NULL, 0x5059,
		    "%s: NULL response queue pointer.\n", __func__);
2161 2162 2163
		return IRQ_NONE;
	}

2164
	ha = rsp->hw;
2165 2166 2167
	reg = &ha->iobase->isp24;
	status = 0;

2168 2169 2170
	if (unlikely(pci_channel_offline(ha->pdev)))
		return IRQ_HANDLED;

2171
	spin_lock_irqsave(&ha->hardware_lock, flags);
2172
	vha = pci_get_drvdata(ha->pdev);
2173 2174 2175
	for (iter = 50; iter--; ) {
		stat = RD_REG_DWORD(&reg->host_status);
		if (stat & HSRX_RISC_PAUSED) {
2176
			if (unlikely(pci_channel_offline(ha->pdev)))
2177 2178
				break;

2179 2180
			hccr = RD_REG_DWORD(&reg->hccr);

2181 2182 2183
			ql_log(ql_log_warn, vha, 0x504b,
			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
			    hccr);
2184

2185
			qla2xxx_check_risc_status(vha);
2186

2187 2188
			ha->isp_ops->fw_dump(vha, 1);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2189 2190 2191 2192 2193 2194 2195 2196 2197
			break;
		} else if ((stat & HSRX_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
		case 0x1:
		case 0x2:
		case 0x10:
		case 0x11:
2198
			qla24xx_mbx_completion(vha, MSW(stat));
2199 2200 2201 2202 2203 2204 2205 2206
			status |= MBX_INTERRUPT;

			break;
		case 0x12:
			mb[0] = MSW(stat);
			mb[1] = RD_REG_WORD(&reg->mailbox1);
			mb[2] = RD_REG_WORD(&reg->mailbox2);
			mb[3] = RD_REG_WORD(&reg->mailbox3);
2207
			qla2x00_async_event(vha, rsp, mb);
2208 2209
			break;
		case 0x13:
2210
		case 0x14:
2211
			qla24xx_process_response_queue(vha, rsp);
2212
			break;
2213 2214 2215 2216 2217 2218 2219
		case 0x1C: /* ATIO queue updated */
			qlt_24xx_process_atio_queue(vha);
			break;
		case 0x1D: /* ATIO and response queues updated */
			qlt_24xx_process_atio_queue(vha);
			qla24xx_process_response_queue(vha, rsp);
			break;
2220
		default:
2221 2222
			ql_dbg(ql_dbg_async, vha, 0x504f,
			    "Unrecognized interrupt type (%d).\n", stat * 0xff);
2223 2224 2225 2226 2227
			break;
		}
		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
		RD_REG_DWORD_RELAXED(&reg->hccr);
	}
2228
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2229 2230 2231 2232

	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2233
		complete(&ha->mbx_intr_comp);
2234 2235 2236 2237 2238
	}

	return IRQ_HANDLED;
}

2239 2240 2241
static irqreturn_t
qla24xx_msix_rsp_q(int irq, void *dev_id)
{
2242 2243
	struct qla_hw_data *ha;
	struct rsp_que *rsp;
2244
	struct device_reg_24xx __iomem *reg;
2245
	struct scsi_qla_host *vha;
2246
	unsigned long flags;
2247

2248 2249
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
2250 2251
		ql_log(ql_log_info, NULL, 0x505a,
		    "%s: NULL response queue pointer.\n", __func__);
2252 2253 2254
		return IRQ_NONE;
	}
	ha = rsp->hw;
2255 2256
	reg = &ha->iobase->isp24;

2257
	spin_lock_irqsave(&ha->hardware_lock, flags);
2258

2259
	vha = pci_get_drvdata(ha->pdev);
2260
	qla24xx_process_response_queue(vha, rsp);
2261
	if (!ha->flags.disable_msix_handshake) {
2262 2263 2264
		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
		RD_REG_DWORD_RELAXED(&reg->hccr);
	}
2265
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2266 2267 2268 2269

	return IRQ_HANDLED;
}

2270 2271 2272 2273 2274
static irqreturn_t
qla25xx_msix_rsp_q(int irq, void *dev_id)
{
	struct qla_hw_data *ha;
	struct rsp_que *rsp;
2275
	struct device_reg_24xx __iomem *reg;
2276
	unsigned long flags;
2277 2278 2279

	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
2280 2281
		ql_log(ql_log_info, NULL, 0x505b,
		    "%s: NULL response queue pointer.\n", __func__);
2282 2283 2284 2285
		return IRQ_NONE;
	}
	ha = rsp->hw;

2286
	/* Clear the interrupt, if enabled, for this response queue */
2287
	if (!ha->flags.disable_msix_handshake) {
2288
		reg = &ha->iobase->isp24;
2289
		spin_lock_irqsave(&ha->hardware_lock, flags);
2290 2291
		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
		RD_REG_DWORD_RELAXED(&reg->hccr);
2292
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2293
	}
2294 2295 2296 2297 2298
	queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);

	return IRQ_HANDLED;
}

2299 2300 2301
static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
2302 2303 2304
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
	struct rsp_que *rsp;
2305 2306 2307 2308 2309
	struct device_reg_24xx __iomem *reg;
	int		status;
	uint32_t	stat;
	uint32_t	hccr;
	uint16_t	mb[4];
2310
	unsigned long flags;
2311

2312 2313
	rsp = (struct rsp_que *) dev_id;
	if (!rsp) {
2314 2315
		ql_log(ql_log_info, NULL, 0x505c,
		    "%s: NULL response queue pointer.\n", __func__);
2316 2317 2318
		return IRQ_NONE;
	}
	ha = rsp->hw;
2319 2320 2321
	reg = &ha->iobase->isp24;
	status = 0;

2322
	spin_lock_irqsave(&ha->hardware_lock, flags);
2323
	vha = pci_get_drvdata(ha->pdev);
2324
	do {
2325 2326
		stat = RD_REG_DWORD(&reg->host_status);
		if (stat & HSRX_RISC_PAUSED) {
2327
			if (unlikely(pci_channel_offline(ha->pdev)))
2328 2329
				break;

2330 2331
			hccr = RD_REG_DWORD(&reg->hccr);

2332 2333 2334
			ql_log(ql_log_info, vha, 0x5050,
			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
			    hccr);
2335

2336
			qla2xxx_check_risc_status(vha);
2337

2338 2339
			ha->isp_ops->fw_dump(vha, 1);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2340 2341 2342 2343 2344 2345 2346 2347 2348
			break;
		} else if ((stat & HSRX_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
		case 0x1:
		case 0x2:
		case 0x10:
		case 0x11:
2349
			qla24xx_mbx_completion(vha, MSW(stat));
2350 2351 2352 2353 2354 2355 2356 2357
			status |= MBX_INTERRUPT;

			break;
		case 0x12:
			mb[0] = MSW(stat);
			mb[1] = RD_REG_WORD(&reg->mailbox1);
			mb[2] = RD_REG_WORD(&reg->mailbox2);
			mb[3] = RD_REG_WORD(&reg->mailbox3);
2358
			qla2x00_async_event(vha, rsp, mb);
2359 2360
			break;
		case 0x13:
2361
		case 0x14:
2362
			qla24xx_process_response_queue(vha, rsp);
2363
			break;
2364 2365 2366 2367 2368 2369 2370
		case 0x1C: /* ATIO queue updated */
			qlt_24xx_process_atio_queue(vha);
			break;
		case 0x1D: /* ATIO and response queues updated */
			qlt_24xx_process_atio_queue(vha);
			qla24xx_process_response_queue(vha, rsp);
			break;
2371
		default:
2372 2373
			ql_dbg(ql_dbg_async, vha, 0x5051,
			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
2374 2375 2376
			break;
		}
		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2377
	} while (0);
2378
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2379 2380 2381 2382

	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2383
		complete(&ha->mbx_intr_comp);
2384 2385 2386 2387 2388 2389 2390 2391
	}
	return IRQ_HANDLED;
}

/* Interrupt handling helpers. */

struct qla_init_msix_entry {
	const char *name;
2392
	irq_handler_t handler;
2393 2394
};

2395
static struct qla_init_msix_entry msix_entries[3] = {
2396 2397
	{ "qla2xxx (default)", qla24xx_msix_default },
	{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2398
	{ "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2399 2400
};

2401 2402 2403 2404 2405
static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
	{ "qla2xxx (default)", qla82xx_msix_default },
	{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};

2406
static void
2407
qla24xx_disable_msix(struct qla_hw_data *ha)
2408 2409 2410
{
	int i;
	struct qla_msix_entry *qentry;
2411
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2412

2413 2414
	for (i = 0; i < ha->msix_count; i++) {
		qentry = &ha->msix_entries[i];
2415
		if (qentry->have_irq)
2416
			free_irq(qentry->vector, qentry->rsp);
2417 2418
	}
	pci_disable_msix(ha->pdev);
2419 2420 2421
	kfree(ha->msix_entries);
	ha->msix_entries = NULL;
	ha->flags.msix_enabled = 0;
2422 2423
	ql_dbg(ql_dbg_init, vha, 0x0042,
	    "Disabled the MSI.\n");
2424 2425 2426
}

static int
2427
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2428
{
2429
#define MIN_MSIX_COUNT	2
2430
	int i, ret;
2431
	struct msix_entry *entries;
2432
	struct qla_msix_entry *qentry;
2433
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2434 2435

	entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2436
			GFP_KERNEL);
2437 2438 2439
	if (!entries) {
		ql_log(ql_log_warn, vha, 0x00bc,
		    "Failed to allocate memory for msix_entry.\n");
2440
		return -ENOMEM;
2441
	}
2442

2443 2444
	for (i = 0; i < ha->msix_count; i++)
		entries[i].entry = i;
2445

2446
	ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2447
	if (ret) {
2448 2449 2450
		if (ret < MIN_MSIX_COUNT)
			goto msix_failed;

2451 2452 2453 2454
		ql_log(ql_log_warn, vha, 0x00c6,
		    "MSI-X: Failed to enable support "
		    "-- %d/%d\n Retry with %d vectors.\n",
		    ha->msix_count, ret, ret);
2455 2456 2457
		ha->msix_count = ret;
		ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
		if (ret) {
2458
msix_failed:
2459 2460 2461 2462
			ql_log(ql_log_fatal, vha, 0x00c7,
			    "MSI-X: Failed to enable support, "
			    "giving   up -- %d/%d.\n",
			    ha->msix_count, ret);
2463 2464
			goto msix_out;
		}
2465
		ha->max_rsp_queues = ha->msix_count - 1;
2466 2467 2468 2469
	}
	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
				ha->msix_count, GFP_KERNEL);
	if (!ha->msix_entries) {
2470 2471
		ql_log(ql_log_fatal, vha, 0x00c8,
		    "Failed to allocate memory for ha->msix_entries.\n");
2472
		ret = -ENOMEM;
2473 2474 2475 2476
		goto msix_out;
	}
	ha->flags.msix_enabled = 1;

2477 2478 2479 2480
	for (i = 0; i < ha->msix_count; i++) {
		qentry = &ha->msix_entries[i];
		qentry->vector = entries[i].vector;
		qentry->entry = entries[i].entry;
2481
		qentry->have_irq = 0;
2482
		qentry->rsp = NULL;
2483 2484
	}

2485 2486 2487
	/* Enable MSI-X vectors for the base queue */
	for (i = 0; i < 2; i++) {
		qentry = &ha->msix_entries[i];
2488 2489 2490 2491 2492 2493 2494 2495 2496
		if (IS_QLA82XX(ha)) {
			ret = request_irq(qentry->vector,
				qla82xx_msix_entries[i].handler,
				0, qla82xx_msix_entries[i].name, rsp);
		} else {
			ret = request_irq(qentry->vector,
				msix_entries[i].handler,
				0, msix_entries[i].name, rsp);
		}
2497
		if (ret) {
2498 2499 2500
			ql_log(ql_log_fatal, vha, 0x00cb,
			    "MSI-X: unable to register handler -- %x/%d.\n",
			    qentry->vector, ret);
2501 2502 2503 2504 2505 2506 2507
			qla24xx_disable_msix(ha);
			ha->mqenable = 0;
			goto msix_out;
		}
		qentry->have_irq = 1;
		qentry->rsp = rsp;
		rsp->msix = qentry;
2508 2509 2510
	}

	/* Enable MSI-X vector for response queue update for queue 0 */
2511 2512 2513 2514 2515 2516 2517 2518
	if (IS_QLA83XX(ha)) {
		if (ha->msixbase && ha->mqiobase &&
		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
			ha->mqenable = 1;
	} else
		if (ha->mqiobase
		    && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
			ha->mqenable = 1;
2519 2520 2521 2522 2523 2524
	ql_dbg(ql_dbg_multiq, vha, 0xc005,
	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
	ql_dbg(ql_dbg_init, vha, 0x0055,
	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2525

2526
msix_out:
2527
	kfree(entries);
2528 2529 2530 2531
	return ret;
}

int
2532
qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2533 2534
{
	int ret;
2535
	device_reg_t __iomem *reg = ha->iobase;
2536
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2537 2538

	/* If possible, enable MSI-X. */
2539 2540
	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
		!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
2541 2542 2543 2544 2545 2546
		goto skip_msi;

	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
		(ha->pdev->subsystem_device == 0x7040 ||
		ha->pdev->subsystem_device == 0x7041 ||
		ha->pdev->subsystem_device == 0x1705)) {
2547 2548
		ql_log(ql_log_warn, vha, 0x0034,
		    "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2549
			ha->pdev->subsystem_vendor,
2550
			ha->pdev->subsystem_device);
2551 2552
		goto skip_msi;
	}
2553

2554
	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2555 2556
		ql_log(ql_log_warn, vha, 0x0035,
		    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2557
		    ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2558 2559 2560
		goto skip_msix;
	}

2561
	ret = qla24xx_enable_msix(ha, rsp);
2562
	if (!ret) {
2563 2564 2565
		ql_dbg(ql_dbg_init, vha, 0x0036,
		    "MSI-X: Enabled (0x%X, 0x%X).\n",
		    ha->chip_revision, ha->fw_attributes);
2566
		goto clear_risc_ints;
2567
	}
2568 2569
	ql_log(ql_log_info, vha, 0x0037,
	    "MSI-X Falling back-to MSI mode -%d.\n", ret);
2570
skip_msix:
2571

2572 2573
	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
	    !IS_QLA8001(ha))
2574 2575 2576 2577
		goto skip_msi;

	ret = pci_enable_msi(ha->pdev);
	if (!ret) {
2578 2579
		ql_dbg(ql_dbg_init, vha, 0x0038,
		    "MSI: Enabled.\n");
2580
		ha->flags.msi_enabled = 1;
2581
	} else
2582 2583
		ql_log(ql_log_warn, vha, 0x0039,
		    "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2584 2585
skip_msi:

2586
	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2587 2588
	    ha->flags.msi_enabled ? 0 : IRQF_SHARED,
	    QLA2XXX_DRIVER_NAME, rsp);
2589
	if (ret) {
2590
		ql_log(ql_log_warn, vha, 0x003a,
2591 2592
		    "Failed to reserve interrupt %d already in use.\n",
		    ha->pdev->irq);
2593 2594
		goto fail;
	}
2595

2596 2597
clear_risc_ints:

2598 2599 2600 2601
	/*
	 * FIXME: Noted that 8014s were being dropped during NK testing.
	 * Timing deltas during MSI-X/INTa transitions?
	 */
2602
	if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
2603
		goto fail;
2604
	spin_lock_irq(&ha->hardware_lock);
2605 2606 2607 2608 2609 2610 2611
	if (IS_FWI2_CAPABLE(ha)) {
		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
	} else {
		WRT_REG_WORD(&reg->isp.semaphore, 0);
		WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
		WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2612
	}
2613
	spin_unlock_irq(&ha->hardware_lock);
2614

2615
fail:
2616 2617 2618 2619
	return ret;
}

void
2620
qla2x00_free_irqs(scsi_qla_host_t *vha)
2621
{
2622
	struct qla_hw_data *ha = vha->hw;
2623 2624 2625 2626 2627 2628 2629 2630 2631
	struct rsp_que *rsp;

	/*
	 * We need to check that ha->rsp_q_map is valid in case we are called
	 * from a probe failure context.
	 */
	if (!ha->rsp_q_map || !ha->rsp_q_map[0])
		return;
	rsp = ha->rsp_q_map[0];
2632 2633 2634

	if (ha->flags.msix_enabled)
		qla24xx_disable_msix(ha);
2635
	else if (ha->flags.msi_enabled) {
2636
		free_irq(ha->pdev->irq, rsp);
2637
		pci_disable_msi(ha->pdev);
2638 2639
	} else
		free_irq(ha->pdev->irq, rsp);
2640
}
2641

2642 2643 2644 2645

int qla25xx_request_irq(struct rsp_que *rsp)
{
	struct qla_hw_data *ha = rsp->hw;
2646
	struct qla_init_msix_entry *intr = &msix_entries[2];
2647
	struct qla_msix_entry *msix = rsp->msix;
2648
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2649 2650 2651 2652
	int ret;

	ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
	if (ret) {
2653 2654 2655
		ql_log(ql_log_fatal, vha, 0x00e6,
		    "MSI-X: Unable to register handler -- %x/%d.\n",
		    msix->vector, ret);
2656 2657 2658 2659 2660 2661
		return ret;
	}
	msix->have_irq = 1;
	msix->rsp = rsp;
	return ret;
}