qla_iocb.c 21.3 KB
Newer Older
A
Andrew Vasquez 已提交
1 2
/*
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2008 QLogic Corporation
L
Linus Torvalds 已提交
4
 *
A
Andrew Vasquez 已提交
5 6
 * See LICENSE.qla2xxx for copyright and licensing details.
 */
L
Linus Torvalds 已提交
7 8 9 10 11 12 13
#include "qla_def.h"

#include <linux/blkdev.h>
#include <linux/delay.h>

#include <scsi/scsi_tcq.h>

14 15 16
static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
							struct rsp_que *rsp);
static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
L
Linus Torvalds 已提交
17

18
static void qla25xx_set_que(srb_t *, struct req_que **, struct rsp_que **);
L
Linus Torvalds 已提交
19 20 21 22 23 24 25
/**
 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
 * @cmd: SCSI command
 *
 * Returns the proper CF_* direction based on CDB.
 */
static inline uint16_t
26
qla2x00_get_cmd_direction(srb_t *sp)
L
Linus Torvalds 已提交
27 28 29 30 31 32
{
	uint16_t cflags;

	cflags = 0;

	/* Set transfer direction */
33
	if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
L
Linus Torvalds 已提交
34
		cflags = CF_WRITE;
35
		sp->fcport->vha->hw->qla_stats.output_bytes +=
36 37
		    scsi_bufflen(sp->cmd);
	} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
L
Linus Torvalds 已提交
38
		cflags = CF_READ;
39
		sp->fcport->vha->hw->qla_stats.input_bytes +=
40 41
		    scsi_bufflen(sp->cmd);
	}
L
Linus Torvalds 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
	return (cflags);
}

/**
 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
 * Continuation Type 0 IOCBs to allocate.
 *
 * @dsds: number of data segment decriptors needed
 *
 * Returns the number of IOCB entries needed to store @dsds.
 */
uint16_t
qla2x00_calc_iocbs_32(uint16_t dsds)
{
	uint16_t iocbs;

	iocbs = 1;
	if (dsds > 3) {
		iocbs += (dsds - 3) / 7;
		if ((dsds - 3) % 7)
			iocbs++;
	}
	return (iocbs);
}

/**
 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
 * Continuation Type 1 IOCBs to allocate.
 *
 * @dsds: number of data segment decriptors needed
 *
 * Returns the number of IOCB entries needed to store @dsds.
 */
uint16_t
qla2x00_calc_iocbs_64(uint16_t dsds)
{
	uint16_t iocbs;

	iocbs = 1;
	if (dsds > 2) {
		iocbs += (dsds - 2) / 5;
		if ((dsds - 2) % 5)
			iocbs++;
	}
	return (iocbs);
}

/**
 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
 * @ha: HA context
 *
 * Returns a pointer to the Continuation Type 0 IOCB packet.
 */
static inline cont_entry_t *
96
qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
L
Linus Torvalds 已提交
97 98 99
{
	cont_entry_t *cont_pkt;
	/* Adjust ring index. */
100 101 102 103
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
L
Linus Torvalds 已提交
104
	} else {
105
		req->ring_ptr++;
L
Linus Torvalds 已提交
106 107
	}

108
	cont_pkt = (cont_entry_t *)req->ring_ptr;
L
Linus Torvalds 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123

	/* Load packet defaults. */
	*((uint32_t *)(&cont_pkt->entry_type)) =
	    __constant_cpu_to_le32(CONTINUE_TYPE);

	return (cont_pkt);
}

/**
 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 * @ha: HA context
 *
 * Returns a pointer to the continuation type 1 IOCB packet.
 */
static inline cont_a64_entry_t *
124
qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
L
Linus Torvalds 已提交
125 126 127 128
{
	cont_a64_entry_t *cont_pkt;

	/* Adjust ring index. */
129 130 131 132
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
L
Linus Torvalds 已提交
133
	} else {
134
		req->ring_ptr++;
L
Linus Torvalds 已提交
135 136
	}

137
	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
L
Linus Torvalds 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158

	/* Load packet defaults. */
	*((uint32_t *)(&cont_pkt->entry_type)) =
	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);

	return (cont_pkt);
}

/**
 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 * capable IOCB types.
 *
 * @sp: SRB command to process
 * @cmd_pkt: Command type 2 IOCB
 * @tot_dsds: Total number of segments to transfer
 */
void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
    uint16_t tot_dsds)
{
	uint16_t	avail_dsds;
	uint32_t	*cur_dsd;
159
	scsi_qla_host_t	*vha;
L
Linus Torvalds 已提交
160
	struct scsi_cmnd *cmd;
161 162
	struct scatterlist *sg;
	int i;
163
	struct req_que *req;
L
Linus Torvalds 已提交
164 165 166 167 168 169 170 171

	cmd = sp->cmd;

	/* Update entry type to indicate Command Type 2 IOCB */
	*((uint32_t *)(&cmd_pkt->entry_type)) =
	    __constant_cpu_to_le32(COMMAND_TYPE);

	/* No data transfer */
172
	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
L
Linus Torvalds 已提交
173 174 175 176
		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
		return;
	}

177
	vha = sp->fcport->vha;
178
	req = sp->que;
L
Linus Torvalds 已提交
179

180
	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
L
Linus Torvalds 已提交
181 182 183 184 185 186

	/* Three DSDs are available in the Command Type 2 IOCB */
	avail_dsds = 3;
	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;

	/* Load data segments */
187 188 189 190 191 192 193 194 195
	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
		cont_entry_t *cont_pkt;

		/* Allocate additional continuation packets? */
		if (avail_dsds == 0) {
			/*
			 * Seven DSDs are available in the Continuation
			 * Type 0 IOCB.
			 */
196
			cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
197 198
			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
			avail_dsds = 7;
L
Linus Torvalds 已提交
199
		}
200 201 202 203

		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
		avail_dsds--;
L
Linus Torvalds 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
	}
}

/**
 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 * capable IOCB types.
 *
 * @sp: SRB command to process
 * @cmd_pkt: Command type 3 IOCB
 * @tot_dsds: Total number of segments to transfer
 */
void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
    uint16_t tot_dsds)
{
	uint16_t	avail_dsds;
	uint32_t	*cur_dsd;
220
	scsi_qla_host_t	*vha;
L
Linus Torvalds 已提交
221
	struct scsi_cmnd *cmd;
222 223
	struct scatterlist *sg;
	int i;
224
	struct req_que *req;
L
Linus Torvalds 已提交
225 226 227 228 229 230 231 232

	cmd = sp->cmd;

	/* Update entry type to indicate Command Type 3 IOCB */
	*((uint32_t *)(&cmd_pkt->entry_type)) =
	    __constant_cpu_to_le32(COMMAND_A64_TYPE);

	/* No data transfer */
233
	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
L
Linus Torvalds 已提交
234 235 236 237
		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
		return;
	}

238
	vha = sp->fcport->vha;
239
	req = sp->que;
L
Linus Torvalds 已提交
240

241
	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
L
Linus Torvalds 已提交
242 243 244 245 246 247

	/* Two DSDs are available in the Command Type 3 IOCB */
	avail_dsds = 2;
	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;

	/* Load data segments */
248 249 250 251 252 253 254 255 256 257
	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
		dma_addr_t	sle_dma;
		cont_a64_entry_t *cont_pkt;

		/* Allocate additional continuation packets? */
		if (avail_dsds == 0) {
			/*
			 * Five DSDs are available in the Continuation
			 * Type 1 IOCB.
			 */
258
			cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
259 260
			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
			avail_dsds = 5;
L
Linus Torvalds 已提交
261
		}
262 263 264 265 266 267

		sle_dma = sg_dma_address(sg);
		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
		avail_dsds--;
L
Linus Torvalds 已提交
268 269 270 271 272 273 274
	}
}

/**
 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 * @sp: command to send to the ISP
 *
275
 * Returns non-zero if a failure occurred, else zero.
L
Linus Torvalds 已提交
276 277 278 279
 */
int
qla2x00_start_scsi(srb_t *sp)
{
280
	int		ret, nseg;
L
Linus Torvalds 已提交
281
	unsigned long   flags;
282
	scsi_qla_host_t	*vha;
L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290
	struct scsi_cmnd *cmd;
	uint32_t	*clr_ptr;
	uint32_t        index;
	uint32_t	handle;
	cmd_entry_t	*cmd_pkt;
	uint16_t	cnt;
	uint16_t	req_cnt;
	uint16_t	tot_dsds;
291
	struct device_reg_2xxx __iomem *reg;
292 293
	struct qla_hw_data *ha;
	struct req_que *req;
294
	struct rsp_que *rsp;
L
Linus Torvalds 已提交
295 296 297

	/* Setup device pointers. */
	ret = 0;
298
	vha = sp->fcport->vha;
299
	ha = vha->hw;
300
	reg = &ha->iobase->isp;
L
Linus Torvalds 已提交
301
	cmd = sp->cmd;
302 303
	req = ha->req_q_map[0];
	rsp = ha->rsp_q_map[0];
已提交
304 305
	/* So we know we haven't pci_map'ed anything yet */
	tot_dsds = 0;
L
Linus Torvalds 已提交
306 307

	/* Send marker if required */
308
	if (vha->marker_needed != 0) {
309 310
		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
							!= QLA_SUCCESS)
L
Linus Torvalds 已提交
311
			return (QLA_FUNCTION_FAILED);
312
		vha->marker_needed = 0;
L
Linus Torvalds 已提交
313 314 315
	}

	/* Acquire ring specific lock */
316
	spin_lock_irqsave(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
317 318

	/* Check for room in outstanding command list. */
319
	handle = req->current_outstanding_cmd;
L
Linus Torvalds 已提交
320 321 322 323
	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
		handle++;
		if (handle == MAX_OUTSTANDING_COMMANDS)
			handle = 1;
324
		if (!req->outstanding_cmds[handle])
L
Linus Torvalds 已提交
325 326 327 328 329
			break;
	}
	if (index == MAX_OUTSTANDING_COMMANDS)
		goto queuing_error;

已提交
330
	/* Map the sg table so we have an accurate count of sg entries needed */
331 332 333 334 335 336 337 338
	if (scsi_sg_count(cmd)) {
		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
		    scsi_sg_count(cmd), cmd->sc_data_direction);
		if (unlikely(!nseg))
			goto queuing_error;
	} else
		nseg = 0;

339
	tot_dsds = nseg;
已提交
340

L
Linus Torvalds 已提交
341
	/* Calculate the number of request entries needed. */
342
	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
343
	if (req->cnt < (req_cnt + 2)) {
L
Linus Torvalds 已提交
344
		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
345 346
		if (req->ring_index < cnt)
			req->cnt = cnt - req->ring_index;
L
Linus Torvalds 已提交
347
		else
348 349
			req->cnt = req->length -
			    (req->ring_index - cnt);
L
Linus Torvalds 已提交
350
	}
351
	if (req->cnt < (req_cnt + 2))
L
Linus Torvalds 已提交
352 353 354
		goto queuing_error;

	/* Build command packet */
355 356
	req->current_outstanding_cmd = handle;
	req->outstanding_cmds[handle] = sp;
357
	sp->que = req;
L
Linus Torvalds 已提交
358
	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
359
	req->cnt -= req_cnt;
L
Linus Torvalds 已提交
360

361
	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
L
Linus Torvalds 已提交
362 363 364 365 366 367
	cmd_pkt->handle = handle;
	/* Zero out remaining portion of packet. */
	clr_ptr = (uint32_t *)cmd_pkt + 2;
	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);

368 369 370
	/* Set target ID and LUN number*/
	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
L
Linus Torvalds 已提交
371 372 373 374 375 376

	/* Update tagged queuing modifier */
	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);

	/* Load SCSI command packet. */
	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
377
	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
L
Linus Torvalds 已提交
378 379

	/* Build IOCB segments */
380
	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
L
Linus Torvalds 已提交
381 382 383 384 385 386

	/* Set total data segment count. */
	cmd_pkt->entry_count = (uint8_t)req_cnt;
	wmb();

	/* Adjust ring index. */
387 388 389 390
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
L
Linus Torvalds 已提交
391
	} else
392
		req->ring_ptr++;
L
Linus Torvalds 已提交
393 394 395 396

	sp->flags |= SRB_DMA_VALID;

	/* Set chip new ring index. */
397
	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
L
Linus Torvalds 已提交
398 399
	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */

400
	/* Manage unprocessed RIO/ZIO commands in response queue. */
401
	if (vha->flags.process_response_queue &&
402 403
	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
		qla2x00_process_response_queue(rsp);
404

405
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
406 407 408
	return (QLA_SUCCESS);

queuing_error:
409 410 411
	if (tot_dsds)
		scsi_dma_unmap(cmd);

412
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
L
Linus Torvalds 已提交
413 414 415 416 417 418 419 420 421 422 423 424 425

	return (QLA_FUNCTION_FAILED);
}

/**
 * qla2x00_marker() - Send a marker IOCB to the firmware.
 * @ha: HA context
 * @loop_id: loop ID
 * @lun: LUN
 * @type: marker modifier
 *
 * Can be called from both normal and interrupt context.
 *
426
 * Returns non-zero if a failure occurred, else zero.
L
Linus Torvalds 已提交
427
 */
A
Andrew Vasquez 已提交
428
int
429 430 431
__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
			struct rsp_que *rsp, uint16_t loop_id,
			uint16_t lun, uint8_t type)
L
Linus Torvalds 已提交
432
{
433 434
	mrk_entry_t *mrk;
	struct mrk_entry_24xx *mrk24;
435 436
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
L
Linus Torvalds 已提交
437

438
	mrk24 = NULL;
439
	mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
440 441
	if (mrk == NULL) {
		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
442
		    __func__, base_vha->host_no));
L
Linus Torvalds 已提交
443 444 445 446

		return (QLA_FUNCTION_FAILED);
	}

447 448
	mrk->entry_type = MARKER_TYPE;
	mrk->modifier = type;
L
Linus Torvalds 已提交
449
	if (type != MK_SYNC_ALL) {
450
		if (IS_FWI2_CAPABLE(ha)) {
451 452 453 454
			mrk24 = (struct mrk_entry_24xx *) mrk;
			mrk24->nport_handle = cpu_to_le16(loop_id);
			mrk24->lun[1] = LSB(lun);
			mrk24->lun[2] = MSB(lun);
455
			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
456
			mrk24->vp_index = vha->vp_idx;
457
			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
458 459 460 461
		} else {
			SET_TARGET_ID(ha, mrk->target, loop_id);
			mrk->lun = cpu_to_le16(lun);
		}
L
Linus Torvalds 已提交
462 463 464
	}
	wmb();

465
	qla2x00_isp_cmd(vha, req);
L
Linus Torvalds 已提交
466 467 468 469

	return (QLA_SUCCESS);
}

A
Andrew Vasquez 已提交
470
int
471 472 473
qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
		uint8_t type)
L
Linus Torvalds 已提交
474 475 476 477
{
	int ret;
	unsigned long flags = 0;

478 479 480
	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
L
Linus Torvalds 已提交
481 482 483 484 485 486 487 488 489 490 491 492 493

	return (ret);
}

/**
 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
 * @ha: HA context
 *
 * Note: The caller must hold the hardware lock before calling this routine.
 *
 * Returns NULL if function failed, else, a pointer to the request packet.
 */
static request_t *
494 495
qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
		struct rsp_que *rsp)
L
Linus Torvalds 已提交
496
{
497
	struct qla_hw_data *ha = vha->hw;
498
	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
L
Linus Torvalds 已提交
499 500 501 502 503 504 505 506
	request_t	*pkt = NULL;
	uint16_t	cnt;
	uint32_t	*dword_ptr;
	uint32_t	timer;
	uint16_t	req_cnt = 1;

	/* Wait 1 second for slot. */
	for (timer = HZ; timer; timer--) {
507
		if ((req_cnt + 2) >= req->cnt) {
L
Linus Torvalds 已提交
508
			/* Calculate number of free request entries. */
509 510 511 512 513 514 515 516 517 518 519
			if (ha->mqenable)
				cnt = (uint16_t)
					RD_REG_DWORD(&reg->isp25mq.req_q_out);
			else {
				if (IS_FWI2_CAPABLE(ha))
					cnt = (uint16_t)RD_REG_DWORD(
						&reg->isp24.req_q_out);
				else
					cnt = qla2x00_debounce_register(
						ISP_REQ_Q_OUT(ha, &reg->isp));
			}
520 521
			if  (req->ring_index < cnt)
				req->cnt = cnt - req->ring_index;
L
Linus Torvalds 已提交
522
			else
523 524
				req->cnt = req->length -
				    (req->ring_index - cnt);
L
Linus Torvalds 已提交
525 526
		}
		/* If room for request in request ring. */
527 528 529
		if ((req_cnt + 2) < req->cnt) {
			req->cnt--;
			pkt = req->ring_ptr;
L
Linus Torvalds 已提交
530 531 532 533 534 535 536 537 538 539 540 541 542

			/* Zero out packet. */
			dword_ptr = (uint32_t *)pkt;
			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
				*dword_ptr++ = 0;

			/* Set entry count. */
			pkt->entry_count = 1;

			break;
		}

		/* Release ring specific lock */
543
		spin_unlock_irq(&ha->hardware_lock);
L
Linus Torvalds 已提交
544 545 546 547 548

		udelay(2);   /* 2 us */

		/* Check for pending interrupts. */
		/* During init we issue marker directly */
549
		if (!vha->marker_needed && !vha->flags.init_done)
550
			qla2x00_poll(rsp);
L
Linus Torvalds 已提交
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
		spin_lock_irq(&ha->hardware_lock);
	}
	if (!pkt) {
		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
	}

	return (pkt);
}

/**
 * qla2x00_isp_cmd() - Modify the request ring pointer.
 * @ha: HA context
 *
 * Note: The caller must hold the hardware lock before calling this routine.
 */
566
static void
567
qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
L
Linus Torvalds 已提交
568
{
569
	struct qla_hw_data *ha = vha->hw;
570
	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
571
	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
L
Linus Torvalds 已提交
572 573 574

	DEBUG5(printk("%s(): IOCB data:\n", __func__));
	DEBUG5(qla2x00_dump_buffer(
575
	    (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
L
Linus Torvalds 已提交
576 577

	/* Adjust ring index. */
578 579 580 581
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
L
Linus Torvalds 已提交
582
	} else
583
		req->ring_ptr++;
L
Linus Torvalds 已提交
584 585

	/* Set chip new ring index. */
586 587 588 589
	if (ha->mqenable) {
		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
		RD_REG_DWORD(&ioreg->hccr);
	}
590 591 592 593 594 595 596 597 598
	else {
		if (IS_FWI2_CAPABLE(ha)) {
			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
		} else {
			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
				req->ring_index);
			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
		}
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
	}

}

/**
 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
 * Continuation Type 1 IOCBs to allocate.
 *
 * @dsds: number of data segment decriptors needed
 *
 * Returns the number of IOCB entries needed to store @dsds.
 */
static inline uint16_t
qla24xx_calc_iocbs(uint16_t dsds)
{
	uint16_t iocbs;

	iocbs = 1;
	if (dsds > 1) {
		iocbs += (dsds - 1) / 5;
		if ((dsds - 1) % 5)
			iocbs++;
	}
	return iocbs;
}

/**
 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 * IOCB types.
 *
 * @sp: SRB command to process
 * @cmd_pkt: Command type 3 IOCB
 * @tot_dsds: Total number of segments to transfer
 */
static inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
    uint16_t tot_dsds)
{
	uint16_t	avail_dsds;
	uint32_t	*cur_dsd;
639
	scsi_qla_host_t	*vha;
640
	struct scsi_cmnd *cmd;
641 642
	struct scatterlist *sg;
	int i;
643
	struct req_que *req;
644 645 646 647 648 649 650 651

	cmd = sp->cmd;

	/* Update entry type to indicate Command Type 3 IOCB */
	*((uint32_t *)(&cmd_pkt->entry_type)) =
	    __constant_cpu_to_le32(COMMAND_TYPE_7);

	/* No data transfer */
652
	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
653 654 655 656
		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
		return;
	}

657
	vha = sp->fcport->vha;
658
	req = sp->que;
659 660

	/* Set transfer direction */
661
	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
662 663
		cmd_pkt->task_mgmt_flags =
		    __constant_cpu_to_le16(TMF_WRITE_DATA);
664
		sp->fcport->vha->hw->qla_stats.output_bytes +=
665 666
		    scsi_bufflen(sp->cmd);
	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
667 668
		cmd_pkt->task_mgmt_flags =
		    __constant_cpu_to_le16(TMF_READ_DATA);
669
		sp->fcport->vha->hw->qla_stats.input_bytes +=
670 671
		    scsi_bufflen(sp->cmd);
	}
672 673 674 675 676 677

	/* One DSD is available in the Command Type 3 IOCB */
	avail_dsds = 1;
	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;

	/* Load data segments */
678 679 680 681 682 683 684 685 686 687 688

	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
		dma_addr_t	sle_dma;
		cont_a64_entry_t *cont_pkt;

		/* Allocate additional continuation packets? */
		if (avail_dsds == 0) {
			/*
			 * Five DSDs are available in the Continuation
			 * Type 1 IOCB.
			 */
689
			cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
690 691
			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
			avail_dsds = 5;
692
		}
693 694 695 696 697 698

		sle_dma = sg_dma_address(sg);
		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
		avail_dsds--;
699 700 701 702 703 704 705 706
	}
}


/**
 * qla24xx_start_scsi() - Send a SCSI command to the ISP
 * @sp: command to send to the ISP
 *
707
 * Returns non-zero if a failure occurred, else zero.
708 709 710 711
 */
int
qla24xx_start_scsi(srb_t *sp)
{
712
	int		ret, nseg;
713 714 715 716 717 718 719 720
	unsigned long   flags;
	uint32_t	*clr_ptr;
	uint32_t        index;
	uint32_t	handle;
	struct cmd_type_7 *cmd_pkt;
	uint16_t	cnt;
	uint16_t	req_cnt;
	uint16_t	tot_dsds;
721 722 723
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct scsi_cmnd *cmd = sp->cmd;
724
	struct scsi_qla_host *vha = sp->fcport->vha;
725
	struct qla_hw_data *ha = vha->hw;
726 727 728

	/* Setup device pointers. */
	ret = 0;
729

730
	qla25xx_set_que(sp, &req, &rsp);
731
	sp->que = req;
732

733 734 735 736
	/* So we know we haven't pci_map'ed anything yet */
	tot_dsds = 0;

	/* Send marker if required */
737
	if (vha->marker_needed != 0) {
738 739
		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
							!= QLA_SUCCESS)
740
			return QLA_FUNCTION_FAILED;
741
		vha->marker_needed = 0;
742 743 744
	}

	/* Acquire ring specific lock */
745
	spin_lock_irqsave(&ha->hardware_lock, flags);
746 747

	/* Check for room in outstanding command list. */
748
	handle = req->current_outstanding_cmd;
749 750 751 752
	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
		handle++;
		if (handle == MAX_OUTSTANDING_COMMANDS)
			handle = 1;
753
		if (!req->outstanding_cmds[handle])
754 755 756 757 758 759
			break;
	}
	if (index == MAX_OUTSTANDING_COMMANDS)
		goto queuing_error;

	/* Map the sg table so we have an accurate count of sg entries needed */
760 761 762 763
	if (scsi_sg_count(cmd)) {
		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
		    scsi_sg_count(cmd), cmd->sc_data_direction);
		if (unlikely(!nseg))
764
			goto queuing_error;
765 766 767
	} else
		nseg = 0;

768
	tot_dsds = nseg;
769 770

	req_cnt = qla24xx_calc_iocbs(tot_dsds);
771
	if (req->cnt < (req_cnt + 2)) {
772
		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
773

774 775
		if (req->ring_index < cnt)
			req->cnt = cnt - req->ring_index;
776
		else
777 778
			req->cnt = req->length -
				(req->ring_index - cnt);
779
	}
780
	if (req->cnt < (req_cnt + 2))
781 782 783
		goto queuing_error;

	/* Build command packet. */
784 785
	req->current_outstanding_cmd = handle;
	req->outstanding_cmds[handle] = sp;
786
	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
787
	req->cnt -= req_cnt;
788

789
	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
790
	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
791 792

	/* Zero out remaining portion of packet. */
793
	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
794 795 796 797 798 799 800 801 802
	clr_ptr = (uint32_t *)cmd_pkt + 2;
	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);

	/* Set NPORT-ID and LUN number*/
	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
803
	cmd_pkt->vp_index = sp->fcport->vp_idx;
804

805
	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
806
	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
807 808 809 810 811

	/* Load SCSI command packet. */
	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));

812
	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
813 814 815 816 817 818

	/* Build IOCB segments */
	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);

	/* Set total data segment count. */
	cmd_pkt->entry_count = (uint8_t)req_cnt;
819 820
	/* Specify response queue number where completion should happen */
	cmd_pkt->entry_status = (uint8_t) rsp->id;
821 822 823
	wmb();

	/* Adjust ring index. */
824 825 826 827
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
828
	} else
829
		req->ring_ptr++;
830 831 832 833

	sp->flags |= SRB_DMA_VALID;

	/* Set chip new ring index. */
834 835
	WRT_REG_DWORD(req->req_q_in, req->ring_index);
	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
836

837
	/* Manage unprocessed RIO/ZIO commands in response queue. */
838
	if (vha->flags.process_response_queue &&
839
		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
840
		qla24xx_process_response_queue(vha, rsp);
841

842
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
843 844 845
	return QLA_SUCCESS;

queuing_error:
846 847 848
	if (tot_dsds)
		scsi_dma_unmap(cmd);

849
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
850 851

	return QLA_FUNCTION_FAILED;
L
Linus Torvalds 已提交
852
}
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870

static void qla25xx_set_que(srb_t *sp, struct req_que **req,
	struct rsp_que **rsp)
{
	struct scsi_cmnd *cmd = sp->cmd;
	struct scsi_qla_host *vha = sp->fcport->vha;
	struct qla_hw_data *ha = sp->fcport->vha->hw;
	int affinity = cmd->request->cpu;

	if (ql2xmultique_tag && affinity >= 0 &&
		affinity < ha->max_rsp_queues - 1) {
		*rsp = ha->rsp_q_map[affinity + 1];
		*req = ha->req_q_map[1];
	} else {
		*req = vha->req;
		*rsp = ha->rsp_q_map[0];
	}
}