qla_iocb.c 21.2 KB
Newer Older
A
Andrew Vasquez 已提交
1 2
/*
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2008 QLogic Corporation
L
Linus Torvalds 已提交
4
 *
A
Andrew Vasquez 已提交
5 6
 * See LICENSE.qla2xxx for copyright and licensing details.
 */
L
Linus Torvalds 已提交
7 8 9 10 11 12 13
#include "qla_def.h"

#include <linux/blkdev.h>
#include <linux/delay.h>

#include <scsi/scsi_tcq.h>

14 15 16
static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
							struct rsp_que *rsp);
static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
L
Linus Torvalds 已提交
17

18
static void qla25xx_set_que(srb_t *, struct req_que **, struct rsp_que **);
L
Linus Torvalds 已提交
19 20 21 22 23 24 25
/**
 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
 * @cmd: SCSI command
 *
 * Returns the proper CF_* direction based on CDB.
 */
static inline uint16_t
26
qla2x00_get_cmd_direction(srb_t *sp)
L
Linus Torvalds 已提交
27 28 29 30 31 32
{
	uint16_t cflags;

	cflags = 0;

	/* Set transfer direction */
33
	if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
L
Linus Torvalds 已提交
34
		cflags = CF_WRITE;
35
		sp->fcport->vha->hw->qla_stats.output_bytes +=
36 37
		    scsi_bufflen(sp->cmd);
	} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
L
Linus Torvalds 已提交
38
		cflags = CF_READ;
39
		sp->fcport->vha->hw->qla_stats.input_bytes +=
40 41
		    scsi_bufflen(sp->cmd);
	}
L
Linus Torvalds 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
	return (cflags);
}

/**
 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
 * Continuation Type 0 IOCBs to allocate.
 *
 * @dsds: number of data segment decriptors needed
 *
 * Returns the number of IOCB entries needed to store @dsds.
 */
uint16_t
qla2x00_calc_iocbs_32(uint16_t dsds)
{
	uint16_t iocbs;

	iocbs = 1;
	if (dsds > 3) {
		iocbs += (dsds - 3) / 7;
		if ((dsds - 3) % 7)
			iocbs++;
	}
	return (iocbs);
}

/**
 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
 * Continuation Type 1 IOCBs to allocate.
 *
 * @dsds: number of data segment decriptors needed
 *
 * Returns the number of IOCB entries needed to store @dsds.
 */
uint16_t
qla2x00_calc_iocbs_64(uint16_t dsds)
{
	uint16_t iocbs;

	iocbs = 1;
	if (dsds > 2) {
		iocbs += (dsds - 2) / 5;
		if ((dsds - 2) % 5)
			iocbs++;
	}
	return (iocbs);
}

/**
 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
 * @ha: HA context
 *
 * Returns a pointer to the Continuation Type 0 IOCB packet.
 */
static inline cont_entry_t *
96
qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
L
Linus Torvalds 已提交
97 98
{
	cont_entry_t *cont_pkt;
99
	struct req_que *req = vha->req;
L
Linus Torvalds 已提交
100
	/* Adjust ring index. */
101 102 103 104
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
L
Linus Torvalds 已提交
105
	} else {
106
		req->ring_ptr++;
L
Linus Torvalds 已提交
107 108
	}

109
	cont_pkt = (cont_entry_t *)req->ring_ptr;
L
Linus Torvalds 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

	/* Load packet defaults. */
	*((uint32_t *)(&cont_pkt->entry_type)) =
	    __constant_cpu_to_le32(CONTINUE_TYPE);

	return (cont_pkt);
}

/**
 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 * @ha: HA context
 *
 * Returns a pointer to the continuation type 1 IOCB packet.
 */
static inline cont_a64_entry_t *
125
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
126 127 128
{
	cont_a64_entry_t *cont_pkt;

129
	struct req_que *req = vha->req;
L
Linus Torvalds 已提交
130
	/* Adjust ring index. */
131 132 133 134
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
L
Linus Torvalds 已提交
135
	} else {
136
		req->ring_ptr++;
L
Linus Torvalds 已提交
137 138
	}

139
	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
L
Linus Torvalds 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

	/* Load packet defaults. */
	*((uint32_t *)(&cont_pkt->entry_type)) =
	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);

	return (cont_pkt);
}

/**
 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 * capable IOCB types.
 *
 * @sp: SRB command to process
 * @cmd_pkt: Command type 2 IOCB
 * @tot_dsds: Total number of segments to transfer
 */
void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
    uint16_t tot_dsds)
{
	uint16_t	avail_dsds;
	uint32_t	*cur_dsd;
161
	scsi_qla_host_t	*vha;
L
Linus Torvalds 已提交
162
	struct scsi_cmnd *cmd;
163 164
	struct scatterlist *sg;
	int i;
L
Linus Torvalds 已提交
165 166 167 168 169 170 171 172

	cmd = sp->cmd;

	/* Update entry type to indicate Command Type 2 IOCB */
	*((uint32_t *)(&cmd_pkt->entry_type)) =
	    __constant_cpu_to_le32(COMMAND_TYPE);

	/* No data transfer */
173
	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
L
Linus Torvalds 已提交
174 175 176 177
		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
		return;
	}

178
	vha = sp->fcport->vha;
179
	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
L
Linus Torvalds 已提交
180 181 182 183 184 185

	/* Three DSDs are available in the Command Type 2 IOCB */
	avail_dsds = 3;
	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;

	/* Load data segments */
186 187 188 189 190 191 192 193 194
	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
		cont_entry_t *cont_pkt;

		/* Allocate additional continuation packets? */
		if (avail_dsds == 0) {
			/*
			 * Seven DSDs are available in the Continuation
			 * Type 0 IOCB.
			 */
195
			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 197
			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
			avail_dsds = 7;
L
Linus Torvalds 已提交
198
		}
199 200 201 202

		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
		avail_dsds--;
L
Linus Torvalds 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
	}
}

/**
 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 * capable IOCB types.
 *
 * @sp: SRB command to process
 * @cmd_pkt: Command type 3 IOCB
 * @tot_dsds: Total number of segments to transfer
 */
void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
    uint16_t tot_dsds)
{
	uint16_t	avail_dsds;
	uint32_t	*cur_dsd;
219
	scsi_qla_host_t	*vha;
L
Linus Torvalds 已提交
220
	struct scsi_cmnd *cmd;
221 222
	struct scatterlist *sg;
	int i;
L
Linus Torvalds 已提交
223 224 225 226 227 228 229 230

	cmd = sp->cmd;

	/* Update entry type to indicate Command Type 3 IOCB */
	*((uint32_t *)(&cmd_pkt->entry_type)) =
	    __constant_cpu_to_le32(COMMAND_A64_TYPE);

	/* No data transfer */
231
	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
L
Linus Torvalds 已提交
232 233 234 235
		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
		return;
	}

236
	vha = sp->fcport->vha;
237
	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
L
Linus Torvalds 已提交
238 239 240 241 242 243

	/* Two DSDs are available in the Command Type 3 IOCB */
	avail_dsds = 2;
	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;

	/* Load data segments */
244 245 246 247 248 249 250 251 252 253
	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
		dma_addr_t	sle_dma;
		cont_a64_entry_t *cont_pkt;

		/* Allocate additional continuation packets? */
		if (avail_dsds == 0) {
			/*
			 * Five DSDs are available in the Continuation
			 * Type 1 IOCB.
			 */
254
			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
255 256
			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
			avail_dsds = 5;
L
Linus Torvalds 已提交
257
		}
258 259 260 261 262 263

		sle_dma = sg_dma_address(sg);
		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
		avail_dsds--;
L
Linus Torvalds 已提交
264 265 266 267 268 269 270
	}
}

/**
 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 * @sp: command to send to the ISP
 *
271
 * Returns non-zero if a failure occurred, else zero.
L
Linus Torvalds 已提交
272 273 274 275
 */
int
qla2x00_start_scsi(srb_t *sp)
{
276
	int		ret, nseg;
L
Linus Torvalds 已提交
277
	unsigned long   flags;
278
	scsi_qla_host_t	*vha;
L
Linus Torvalds 已提交
279 280 281 282 283 284 285 286
	struct scsi_cmnd *cmd;
	uint32_t	*clr_ptr;
	uint32_t        index;
	uint32_t	handle;
	cmd_entry_t	*cmd_pkt;
	uint16_t	cnt;
	uint16_t	req_cnt;
	uint16_t	tot_dsds;
287
	struct device_reg_2xxx __iomem *reg;
288 289
	struct qla_hw_data *ha;
	struct req_que *req;
290
	struct rsp_que *rsp;
L
Linus Torvalds 已提交
291 292 293

	/* Setup device pointers. */
	ret = 0;
294
	vha = sp->fcport->vha;
295
	ha = vha->hw;
296
	reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
297
	cmd = sp->cmd;
298 299
	req = ha->req_q_map[0];
	rsp = ha->rsp_q_map[0];
已提交
300 301
	/* So we know we haven't pci_map'ed anything yet */
	tot_dsds = 0;
L
Linus Torvalds 已提交
302 303

	/* Send marker if required */
304
	if (vha->marker_needed != 0) {
305 306
		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
							!= QLA_SUCCESS)
L
Linus Torvalds 已提交
307
			return (QLA_FUNCTION_FAILED);
308
		vha->marker_needed = 0;
L
Linus Torvalds 已提交
309 310 311
	}

	/* Acquire ring specific lock */
312
	spin_lock_irqsave(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
313 314

	/* Check for room in outstanding command list. */
315
	handle = req->current_outstanding_cmd;
L
Linus Torvalds 已提交
316 317 318 319
	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
		handle++;
		if (handle == MAX_OUTSTANDING_COMMANDS)
			handle = 1;
320
		if (!req->outstanding_cmds[handle])
L
Linus Torvalds 已提交
321 322 323 324 325
			break;
	}
	if (index == MAX_OUTSTANDING_COMMANDS)
		goto queuing_error;

已提交
326
	/* Map the sg table so we have an accurate count of sg entries needed */
327 328 329 330 331 332 333 334
	if (scsi_sg_count(cmd)) {
		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
		    scsi_sg_count(cmd), cmd->sc_data_direction);
		if (unlikely(!nseg))
			goto queuing_error;
	} else
		nseg = 0;

335
	tot_dsds = nseg;
已提交
336

L
Linus Torvalds 已提交
337
	/* Calculate the number of request entries needed. */
338
	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
339
	if (req->cnt < (req_cnt + 2)) {
L
Linus Torvalds 已提交
340
		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
341 342
		if (req->ring_index < cnt)
			req->cnt = cnt - req->ring_index;
L
Linus Torvalds 已提交
343
		else
344 345
			req->cnt = req->length -
			    (req->ring_index - cnt);
L
Linus Torvalds 已提交
346
	}
347
	if (req->cnt < (req_cnt + 2))
L
Linus Torvalds 已提交
348 349 350
		goto queuing_error;

	/* Build command packet */
351 352
	req->current_outstanding_cmd = handle;
	req->outstanding_cmds[handle] = sp;
L
Linus Torvalds 已提交
353
	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
354
	req->cnt -= req_cnt;
L
Linus Torvalds 已提交
355

356
	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
L
Linus Torvalds 已提交
357 358 359 360 361 362
	cmd_pkt->handle = handle;
	/* Zero out remaining portion of packet. */
	clr_ptr = (uint32_t *)cmd_pkt + 2;
	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);

363 364 365
	/* Set target ID and LUN number*/
	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
L
Linus Torvalds 已提交
366 367 368 369 370 371

	/* Update tagged queuing modifier */
	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);

	/* Load SCSI command packet. */
	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
372
	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
L
Linus Torvalds 已提交
373 374

	/* Build IOCB segments */
375
	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
L
Linus Torvalds 已提交
376 377 378 379 380 381

	/* Set total data segment count. */
	cmd_pkt->entry_count = (uint8_t)req_cnt;
	wmb();

	/* Adjust ring index. */
382 383 384 385
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
L
Linus Torvalds 已提交
386
	} else
387
		req->ring_ptr++;
L
Linus Torvalds 已提交
388 389 390 391

	sp->flags |= SRB_DMA_VALID;

	/* Set chip new ring index. */
392
	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
L
Linus Torvalds 已提交
393 394
	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */

395
	/* Manage unprocessed RIO/ZIO commands in response queue. */
396
	if (vha->flags.process_response_queue &&
397 398
	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
		qla2x00_process_response_queue(rsp);
399

400
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
401 402 403
	return (QLA_SUCCESS);

queuing_error:
404 405 406
	if (tot_dsds)
		scsi_dma_unmap(cmd);

407
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420

	return (QLA_FUNCTION_FAILED);
}

/**
 * qla2x00_marker() - Send a marker IOCB to the firmware.
 * @ha: HA context
 * @loop_id: loop ID
 * @lun: LUN
 * @type: marker modifier
 *
 * Can be called from both normal and interrupt context.
 *
421
 * Returns non-zero if a failure occurred, else zero.
L
Linus Torvalds 已提交
422
 */
A
Andrew Vasquez 已提交
423
int
424 425 426
__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
			struct rsp_que *rsp, uint16_t loop_id,
			uint16_t lun, uint8_t type)
L
Linus Torvalds 已提交
427
{
428 429
	mrk_entry_t *mrk;
	struct mrk_entry_24xx *mrk24;
430 431
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
432

433
	mrk24 = NULL;
434
	mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
435 436
	if (mrk == NULL) {
		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
437
		    __func__, base_vha->host_no));
L
Linus Torvalds 已提交
438 439 440 441

		return (QLA_FUNCTION_FAILED);
	}

442 443
	mrk->entry_type = MARKER_TYPE;
	mrk->modifier = type;
L
Linus Torvalds 已提交
444
	if (type != MK_SYNC_ALL) {
445
		if (IS_FWI2_CAPABLE(ha)) {
446 447 448 449
			mrk24 = (struct mrk_entry_24xx *) mrk;
			mrk24->nport_handle = cpu_to_le16(loop_id);
			mrk24->lun[1] = LSB(lun);
			mrk24->lun[2] = MSB(lun);
450
			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
451
			mrk24->vp_index = vha->vp_idx;
452
			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
453 454 455 456
		} else {
			SET_TARGET_ID(ha, mrk->target, loop_id);
			mrk->lun = cpu_to_le16(lun);
		}
L
Linus Torvalds 已提交
457 458 459
	}
	wmb();

460
	qla2x00_isp_cmd(vha, req);
L
Linus Torvalds 已提交
461 462 463 464

	return (QLA_SUCCESS);
}

A
Andrew Vasquez 已提交
465
int
466 467 468
qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
		uint8_t type)
L
Linus Torvalds 已提交
469 470 471 472
{
	int ret;
	unsigned long flags = 0;

473 474 475
	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
L
Linus Torvalds 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488

	return (ret);
}

/**
 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
 * @ha: HA context
 *
 * Note: The caller must hold the hardware lock before calling this routine.
 *
 * Returns NULL if function failed, else, a pointer to the request packet.
 */
static request_t *
489 490
qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
		struct rsp_que *rsp)
L
Linus Torvalds 已提交
491
{
492
	struct qla_hw_data *ha = vha->hw;
493
	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
L
Linus Torvalds 已提交
494 495 496 497 498 499 500 501
	request_t	*pkt = NULL;
	uint16_t	cnt;
	uint32_t	*dword_ptr;
	uint32_t	timer;
	uint16_t	req_cnt = 1;

	/* Wait 1 second for slot. */
	for (timer = HZ; timer; timer--) {
502
		if ((req_cnt + 2) >= req->cnt) {
L
Linus Torvalds 已提交
503
			/* Calculate number of free request entries. */
504 505 506 507 508 509 510 511 512 513 514
			if (ha->mqenable)
				cnt = (uint16_t)
					RD_REG_DWORD(&reg->isp25mq.req_q_out);
			else {
				if (IS_FWI2_CAPABLE(ha))
					cnt = (uint16_t)RD_REG_DWORD(
						&reg->isp24.req_q_out);
				else
					cnt = qla2x00_debounce_register(
						ISP_REQ_Q_OUT(ha, &reg->isp));
			}
515 516
			if  (req->ring_index < cnt)
				req->cnt = cnt - req->ring_index;
L
Linus Torvalds 已提交
517
			else
518 519
				req->cnt = req->length -
				    (req->ring_index - cnt);
L
Linus Torvalds 已提交
520 521
		}
		/* If room for request in request ring. */
522 523 524
		if ((req_cnt + 2) < req->cnt) {
			req->cnt--;
			pkt = req->ring_ptr;
L
Linus Torvalds 已提交
525 526 527 528 529 530 531 532 533 534 535 536 537

			/* Zero out packet. */
			dword_ptr = (uint32_t *)pkt;
			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
				*dword_ptr++ = 0;

			/* Set entry count. */
			pkt->entry_count = 1;

			break;
		}

		/* Release ring specific lock */
538
		spin_unlock_irq(&ha->hardware_lock);
L
Linus Torvalds 已提交
539 540 541 542 543

		udelay(2);   /* 2 us */

		/* Check for pending interrupts. */
		/* During init we issue marker directly */
544
		if (!vha->marker_needed && !vha->flags.init_done)
545
			qla2x00_poll(rsp);
L
Linus Torvalds 已提交
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
		spin_lock_irq(&ha->hardware_lock);
	}
	if (!pkt) {
		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
	}

	return (pkt);
}

/**
 * qla2x00_isp_cmd() - Modify the request ring pointer.
 * @ha: HA context
 *
 * Note: The caller must hold the hardware lock before calling this routine.
 */
561
static void
562
qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
L
Linus Torvalds 已提交
563
{
564
	struct qla_hw_data *ha = vha->hw;
565
	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
566
	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
L
Linus Torvalds 已提交
567 568 569

	DEBUG5(printk("%s(): IOCB data:\n", __func__));
	DEBUG5(qla2x00_dump_buffer(
570
	    (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
L
Linus Torvalds 已提交
571 572

	/* Adjust ring index. */
573 574 575 576
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
L
Linus Torvalds 已提交
577
	} else
578
		req->ring_ptr++;
L
Linus Torvalds 已提交
579 580

	/* Set chip new ring index. */
581 582 583 584
	if (ha->mqenable) {
		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
		RD_REG_DWORD(&ioreg->hccr);
	}
585 586 587 588 589 590 591 592 593
	else {
		if (IS_FWI2_CAPABLE(ha)) {
			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
		} else {
			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
				req->ring_index);
			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
		}
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	}

}

/**
 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
 * Continuation Type 1 IOCBs to allocate.
 *
 * @dsds: number of data segment decriptors needed
 *
 * Returns the number of IOCB entries needed to store @dsds.
 */
static inline uint16_t
qla24xx_calc_iocbs(uint16_t dsds)
{
	uint16_t iocbs;

	iocbs = 1;
	if (dsds > 1) {
		iocbs += (dsds - 1) / 5;
		if ((dsds - 1) % 5)
			iocbs++;
	}
	return iocbs;
}

/**
 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 * IOCB types.
 *
 * @sp: SRB command to process
 * @cmd_pkt: Command type 3 IOCB
 * @tot_dsds: Total number of segments to transfer
 */
static inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
    uint16_t tot_dsds)
{
	uint16_t	avail_dsds;
	uint32_t	*cur_dsd;
634
	scsi_qla_host_t	*vha;
635
	struct scsi_cmnd *cmd;
636 637
	struct scatterlist *sg;
	int i;
638
	struct req_que *req;
639 640 641 642 643 644 645 646

	cmd = sp->cmd;

	/* Update entry type to indicate Command Type 3 IOCB */
	*((uint32_t *)(&cmd_pkt->entry_type)) =
	    __constant_cpu_to_le32(COMMAND_TYPE_7);

	/* No data transfer */
647
	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
648 649 650 651
		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
		return;
	}

652
	vha = sp->fcport->vha;
653
	req = vha->req;
654 655

	/* Set transfer direction */
656
	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
657 658
		cmd_pkt->task_mgmt_flags =
		    __constant_cpu_to_le16(TMF_WRITE_DATA);
659
		sp->fcport->vha->hw->qla_stats.output_bytes +=
660 661
		    scsi_bufflen(sp->cmd);
	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
662 663
		cmd_pkt->task_mgmt_flags =
		    __constant_cpu_to_le16(TMF_READ_DATA);
664
		sp->fcport->vha->hw->qla_stats.input_bytes +=
665 666
		    scsi_bufflen(sp->cmd);
	}
667 668 669 670 671 672

	/* One DSD is available in the Command Type 3 IOCB */
	avail_dsds = 1;
	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;

	/* Load data segments */
673 674 675 676 677 678 679 680 681 682 683

	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
		dma_addr_t	sle_dma;
		cont_a64_entry_t *cont_pkt;

		/* Allocate additional continuation packets? */
		if (avail_dsds == 0) {
			/*
			 * Five DSDs are available in the Continuation
			 * Type 1 IOCB.
			 */
684
			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
685 686
			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
			avail_dsds = 5;
687
		}
688 689 690 691 692 693

		sle_dma = sg_dma_address(sg);
		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
		avail_dsds--;
694 695 696 697 698 699 700 701
	}
}


/**
 * qla24xx_start_scsi() - Send a SCSI command to the ISP
 * @sp: command to send to the ISP
 *
702
 * Returns non-zero if a failure occurred, else zero.
703 704 705 706
 */
int
qla24xx_start_scsi(srb_t *sp)
{
707
	int		ret, nseg;
708 709 710 711 712 713 714 715
	unsigned long   flags;
	uint32_t	*clr_ptr;
	uint32_t        index;
	uint32_t	handle;
	struct cmd_type_7 *cmd_pkt;
	uint16_t	cnt;
	uint16_t	req_cnt;
	uint16_t	tot_dsds;
716 717 718
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct scsi_cmnd *cmd = sp->cmd;
719
	struct scsi_qla_host *vha = sp->fcport->vha;
720
	struct qla_hw_data *ha = vha->hw;
721 722 723

	/* Setup device pointers. */
	ret = 0;
724

725
	qla25xx_set_que(sp, &req, &rsp);
726

727 728 729 730
	/* So we know we haven't pci_map'ed anything yet */
	tot_dsds = 0;

	/* Send marker if required */
731
	if (vha->marker_needed != 0) {
732 733
		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
							!= QLA_SUCCESS)
734
			return QLA_FUNCTION_FAILED;
735
		vha->marker_needed = 0;
736 737 738
	}

	/* Acquire ring specific lock */
739
	spin_lock_irqsave(&ha->hardware_lock, flags);
740 741

	/* Check for room in outstanding command list. */
742
	handle = req->current_outstanding_cmd;
743 744 745 746
	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
		handle++;
		if (handle == MAX_OUTSTANDING_COMMANDS)
			handle = 1;
747
		if (!req->outstanding_cmds[handle])
748 749 750 751 752 753
			break;
	}
	if (index == MAX_OUTSTANDING_COMMANDS)
		goto queuing_error;

	/* Map the sg table so we have an accurate count of sg entries needed */
754 755 756 757
	if (scsi_sg_count(cmd)) {
		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
		    scsi_sg_count(cmd), cmd->sc_data_direction);
		if (unlikely(!nseg))
758
			goto queuing_error;
759 760 761
	} else
		nseg = 0;

762
	tot_dsds = nseg;
763 764

	req_cnt = qla24xx_calc_iocbs(tot_dsds);
765
	if (req->cnt < (req_cnt + 2)) {
766
		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
767

768 769
		if (req->ring_index < cnt)
			req->cnt = cnt - req->ring_index;
770
		else
771 772
			req->cnt = req->length -
				(req->ring_index - cnt);
773
	}
774
	if (req->cnt < (req_cnt + 2))
775 776 777
		goto queuing_error;

	/* Build command packet. */
778 779
	req->current_outstanding_cmd = handle;
	req->outstanding_cmds[handle] = sp;
780
	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
781
	req->cnt -= req_cnt;
782

783
	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
784
	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
785 786

	/* Zero out remaining portion of packet. */
787
	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
788 789 790 791 792 793 794 795 796
	clr_ptr = (uint32_t *)cmd_pkt + 2;
	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);

	/* Set NPORT-ID and LUN number*/
	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
797
	cmd_pkt->vp_index = sp->fcport->vp_idx;
798

799
	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
800
	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
801 802 803 804 805

	/* Load SCSI command packet. */
	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));

806
	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
807 808 809 810 811 812

	/* Build IOCB segments */
	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);

	/* Set total data segment count. */
	cmd_pkt->entry_count = (uint8_t)req_cnt;
813 814
	/* Specify response queue number where completion should happen */
	cmd_pkt->entry_status = (uint8_t) rsp->id;
815 816 817
	wmb();

	/* Adjust ring index. */
818 819 820 821
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
822
	} else
823
		req->ring_ptr++;
824 825 826 827

	sp->flags |= SRB_DMA_VALID;

	/* Set chip new ring index. */
828 829
	WRT_REG_DWORD(req->req_q_in, req->ring_index);
	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
830

831
	/* Manage unprocessed RIO/ZIO commands in response queue. */
832
	if (vha->flags.process_response_queue &&
833
		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
834
		qla24xx_process_response_queue(vha, rsp);
835

836
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
837 838 839
	return QLA_SUCCESS;

queuing_error:
840 841 842
	if (tot_dsds)
		scsi_dma_unmap(cmd);

843
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
844 845

	return QLA_FUNCTION_FAILED;
L
Linus Torvalds 已提交
846
}
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864

static void qla25xx_set_que(srb_t *sp, struct req_que **req,
	struct rsp_que **rsp)
{
	struct scsi_cmnd *cmd = sp->cmd;
	struct scsi_qla_host *vha = sp->fcport->vha;
	struct qla_hw_data *ha = sp->fcport->vha->hw;
	int affinity = cmd->request->cpu;

	if (ql2xmultique_tag && affinity >= 0 &&
		affinity < ha->max_rsp_queues - 1) {
		*rsp = ha->rsp_q_map[affinity + 1];
		*req = ha->req_q_map[1];
	} else {
		*req = vha->req;
		*rsp = ha->rsp_q_map[0];
	}
}