lpfc_scsi.c 175.5 KB
Newer Older
1
/*******************************************************************
已提交
2
 * This file is part of the Emulex Linux Device Driver for         *
3
 * Fibre Channel Host Bus Adapters.                                *
4
 * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *
5
 * EMULEX and SLI are trademarks of Emulex.                        *
已提交
6
 * www.emulex.com                                                  *
7
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
已提交
8 9
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
10 11 12 13 14 15 16 17 18 19
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
已提交
20 21
 *******************************************************************/
#include <linux/pci.h>
22
#include <linux/slab.h>
已提交
23
#include <linux/interrupt.h>
24
#include <linux/export.h>
25
#include <linux/delay.h>
26
#include <asm/unaligned.h>
27 28
#include <linux/crc-t10dif.h>
#include <net/checksum.h>
已提交
29 30 31

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
32
#include <scsi/scsi_eh.h>
已提交
33 34 35 36 37
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>

#include "lpfc_version.h"
38
#include "lpfc_hw4.h"
已提交
39 40
#include "lpfc_hw.h"
#include "lpfc_sli.h"
41
#include "lpfc_sli4.h"
42
#include "lpfc_nl.h"
已提交
43 44
#include "lpfc_disc.h"
#include "lpfc.h"
45
#include "lpfc_scsi.h"
已提交
46 47
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
48
#include "lpfc_vport.h"
已提交
49 50 51 52

#define LPFC_RESET_WAIT  2
#define LPFC_ABORT_WAIT  2

53
int _dump_buf_done = 1;
54 55

static char *dif_op_str[] = {
56 57 58 59 60 61 62 63 64
	"PROT_NORMAL",
	"PROT_READ_INSERT",
	"PROT_WRITE_STRIP",
	"PROT_READ_STRIP",
	"PROT_WRITE_INSERT",
	"PROT_READ_PASS",
	"PROT_WRITE_PASS",
};

65 66 67 68 69 70
struct scsi_dif_tuple {
	__be16 guard_tag;       /* Checksum */
	__be16 app_tag;         /* Opaque storage */
	__be32 ref_tag;         /* Target LBA or indirect LBA */
};

71 72 73 74 75
static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
{
	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;

76
	if (vport->phba->cfg_fof)
77 78 79 80 81
		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
	else
		return (struct lpfc_rport_data *)sdev->hostdata;
}

82 83
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
84 85
static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
86 87
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
88 89

static void
90
lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
91 92 93 94 95
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_sglist(cmnd);

	if (!_dump_buf_data) {
96 97
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
98 99 100 101 102 103
				__func__);
		return;
	}


	if (!sgde) {
104 105
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9051 BLKGRD: ERROR: data scatterlist is null\n");
106 107 108 109 110 111 112 113 114 115 116 117 118
		return;
	}

	dst = (void *) _dump_buf_data;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

static void
119
lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
120 121 122 123 124
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_prot_sglist(cmnd);

	if (!_dump_buf_dif) {
125 126
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
127 128 129 130 131
				__func__);
		return;
	}

	if (!sgde) {
132 133
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
134 135 136 137 138 139 140 141 142 143 144 145
		return;
	}

	dst = _dump_buf_dif;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
{
	return sc->device->sector_size;
}

#define LPFC_CHECK_PROTECT_GUARD	1
#define LPFC_CHECK_PROTECT_REF		2
static inline unsigned
lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
{
	return 1;
}

static inline unsigned
lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
{
	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
		return 0;
	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
		return 1;
	return 0;
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
/**
 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called from the lpfc_prep_task_mgmt_cmd function to
 * set the last bit in the response sge entry.
 **/
static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
				struct lpfc_scsi_buf *lpfc_cmd)
{
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
	if (sgl) {
		sgl += 1;
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}
}

191
/**
192
 * lpfc_update_stats - Update statistical data for the command completion
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called when there is a command completion and this
 * function updates the statistical data for the command completion.
 **/
static void
lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
{
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	unsigned long flags;
	struct Scsi_Host  *shost = cmd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	unsigned long latency;
	int i;

	if (cmd->result)
		return;

214 215
	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);

216 217 218
	spin_lock_irqsave(shost->host_lock, flags);
	if (!vport->stat_data_enabled ||
		vport->stat_data_blocked ||
219
		!pnode ||
220 221 222 223 224 225 226 227 228
		!pnode->lat_data ||
		(phba->bucket_type == LPFC_NO_BUCKET)) {
		spin_unlock_irqrestore(shost->host_lock, flags);
		return;
	}

	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
			phba->bucket_step;
229 230 231 232 233
		/* check array subscript bounds */
		if (i < 0)
			i = 0;
		else if (i >= LPFC_MAX_BUCKET_COUNT)
			i = LPFC_MAX_BUCKET_COUNT - 1;
234 235 236 237 238 239 240 241 242 243 244
	} else {
		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
			if (latency <= (phba->bucket_base +
				((1<<i)*phba->bucket_step)))
				break;
	}

	pnode->lat_data[i].cmd_count++;
	spin_unlock_irqrestore(shost->host_lock, flags);
}

245 246 247 248 249 250 251 252 253 254
/**
 * lpfc_change_queue_depth - Alter scsi device queue depth
 * @sdev: Pointer the scsi device on which to change the queue depth.
 * @qdepth: New queue depth to set the sdev to.
 * @reason: The reason for the queue depth change.
 *
 * This function is called by the midlayer and the LLD to alter the queue
 * depth for a scsi device. This function sets the queue depth to the new
 * value and sends an event out to log the queue depth change.
 **/
255
static int
256 257 258
lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
{
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
259 260 261 262 263

	switch (reason) {
	case SCSI_QDEPTH_DEFAULT:
		/* change request from sysfs, fall through */
	case SCSI_QDEPTH_RAMP_UP:
264
		scsi_adjust_queue_depth(sdev, qdepth);
265 266 267 268 269 270 271 272 273 274 275 276 277
		break;
	case SCSI_QDEPTH_QFULL:
		if (scsi_track_queue_full(sdev, qdepth) == 0)
			return sdev->queue_depth;

		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0711 detected queue full - lun queue "
				 "depth adjusted to %d.\n", sdev->queue_depth);
		break;
	default:
		return -EOPNOTSUPP;
	}

278 279 280
	return sdev->queue_depth;
}

281
/**
282
 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
283 284 285 286 287 288 289 290 291
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called when there is resource error in driver or firmware.
 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
 * posts at most 1 event each second. This routine wakes up worker thread of
 * @phba to process WORKER_RAM_DOWN_EVENT event.
 *
 * This routine should be called with no lock held.
 **/
292
void
293
lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
294 295
{
	unsigned long flags;
296
	uint32_t evt_posted;
M
Manuel Schölling 已提交
297
	unsigned long expires;
298 299 300 301 302

	spin_lock_irqsave(&phba->hbalock, flags);
	atomic_inc(&phba->num_rsrc_err);
	phba->last_rsrc_error_time = jiffies;

M
Manuel Schölling 已提交
303 304
	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
	if (time_after(expires, jiffies)) {
305 306 307 308 309 310 311 312 313
		spin_unlock_irqrestore(&phba->hbalock, flags);
		return;
	}

	phba->last_ramp_down_time = jiffies;

	spin_unlock_irqrestore(&phba->hbalock, flags);

	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
314 315
	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
	if (!evt_posted)
316 317 318
		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);

319 320
	if (!evt_posted)
		lpfc_worker_wake_up(phba);
321 322 323
	return;
}

324
/**
325
 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
326 327 328 329 330 331
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
 * thread.This routine reduces queue depth for all scsi device on each vport
 * associated with @phba.
 **/
332 333 334
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
335 336
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
337
	struct scsi_device *sdev;
338
	unsigned long new_queue_depth;
339
	unsigned long num_rsrc_err, num_cmd_success;
340
	int i;
341 342 343 344

	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
	num_cmd_success = atomic_read(&phba->num_cmd_success);

345 346 347 348 349 350 351 352
	/*
	 * The error and success command counters are global per
	 * driver instance.  If another handler has already
	 * operated on this error event, just exit.
	 */
	if (num_rsrc_err == 0)
		return;

353 354
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
355
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
356 357
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
358
				new_queue_depth =
359 360 361 362 363 364 365
					sdev->queue_depth * num_rsrc_err /
					(num_rsrc_err + num_cmd_success);
				if (!new_queue_depth)
					new_queue_depth = sdev->queue_depth - 1;
				else
					new_queue_depth = sdev->queue_depth -
								new_queue_depth;
366 367
				lpfc_change_queue_depth(sdev, new_queue_depth,
							SCSI_QDEPTH_DEFAULT);
368
			}
369
		}
370
	lpfc_destroy_vport_work_array(phba, vports);
371 372 373 374
	atomic_set(&phba->num_rsrc_err, 0);
	atomic_set(&phba->num_cmd_success, 0);
}

375
/**
376
 * lpfc_scsi_dev_block - set all scsi hosts to block state
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
 * @phba: Pointer to HBA context object.
 *
 * This function walks vport list and set each SCSI host to block state
 * by invoking fc_remote_port_delete() routine. This function is invoked
 * with EEH when device's PCI slot has been permanently disabled.
 **/
void
lpfc_scsi_dev_block(struct lpfc_hba *phba)
{
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
	struct scsi_device *sdev;
	struct fc_rport *rport;
	int i;

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
394
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
395 396 397 398 399 400 401 402 403
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
				rport = starget_to_rport(scsi_target(sdev));
				fc_remote_port_delete(rport);
			}
		}
	lpfc_destroy_vport_work_array(phba, vports);
}

404
/**
405
 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
406
 * @vport: The virtual port for which this call being executed.
407
 * @num_to_allocate: The requested number of buffers to allocate.
408
 *
409 410 411 412 413 414
 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
 * the scsi buffer contains all the necessary information needed to initiate
 * a SCSI I/O. The non-DMAable buffer region contains information to build
 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
 * and the initial BPL. In addition to allocating memory, the FCP CMND and
 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
415 416
 *
 * Return codes:
417 418
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
419
 **/
420 421
static int
lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
已提交
422
{
J
James Smart 已提交
423
	struct lpfc_hba *phba = vport->phba;
已提交
424 425 426
	struct lpfc_scsi_buf *psb;
	struct ulp_bde64 *bpl;
	IOCB_t *iocb;
427 428 429
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
	dma_addr_t pdma_phys_bpl;
430
	uint16_t iotag;
431 432 433 434 435 436 437 438 439 440
	int bcnt, bpl_size;

	bpl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp), bpl_size);
已提交
441

442 443 444 445
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
已提交
446

447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
		/*
		 * Get memory from the pci pool to map the virt space to pci
		 * bus space for an I/O.  The DMA buffer includes space for the
		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
		 * necessary to support the sg_tablesize.
		 */
		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
					GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}

		/* Initialize virtual ptrs to dma_buf region. */
		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
					psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;

		psb->fcp_cmnd = psb->data;
		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
476
			sizeof(struct fcp_rsp);
已提交
477

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
		/* Initialize local short-hand pointers. */
		bpl = psb->fcp_bpl;
		pdma_phys_fcp_cmd = psb->dma_handle;
		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
			sizeof(struct fcp_rsp);

		/*
		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
		 * are sg list bdes.  Initialize the first two and leave the
		 * rest for queuecommand.
		 */
		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);

		/* Setup the physical region for the FCP RSP */
		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		if ((phba->sli_rev == 3) &&
				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
			/* fill in immediate fcp command BDE */
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
					unsli3.fcp_ext.icd);
			iocb->un.fcpi64.bdl.addrHigh = 0;
			iocb->ulpBdeCount = 0;
			iocb->ulpLe = 0;
L
Lucas De Marchi 已提交
519
			/* fill in response BDE */
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
							BUFF_TYPE_BDE_64;
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
				sizeof(struct fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrLow =
				putPaddrLow(pdma_phys_fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrHigh =
				putPaddrHigh(pdma_phys_fcp_rsp);
		} else {
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
			iocb->un.fcpi64.bdl.bdeSize =
					(2 * sizeof(struct ulp_bde64));
			iocb->un.fcpi64.bdl.addrLow =
					putPaddrLow(pdma_phys_bpl);
			iocb->un.fcpi64.bdl.addrHigh =
					putPaddrHigh(pdma_phys_bpl);
			iocb->ulpBdeCount = 1;
			iocb->ulpLe = 1;
		}
		iocb->ulpClass = CLASS3;
		psb->status = IOSTAT_SUCCESS;
541
		/* Put it back into the SCSI buffer list */
J
James Smart 已提交
542
		psb->cur_iocbq.context1  = psb;
543
		lpfc_release_scsi_buf_s3(phba, psb);
已提交
544

545
	}
已提交
546

547
	return bcnt;
已提交
548 549
}

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
/**
 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
 * @vport: pointer to lpfc vport data structure.
 *
 * This routine is invoked by the vport cleanup for deletions and the cleanup
 * for an ndlp on removal.
 **/
void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;

	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	list_for_each_entry_safe(psb, next_psb,
				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->rdata && psb->rdata->pnode
			&& psb->rdata->pnode->vport == vport)
			psb->rdata = NULL;
	}
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	spin_unlock_irqrestore(&phba->hbalock, iflag);
}

576 577 578 579 580 581 582 583 584 585 586 587 588
/**
 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
 * @phba: pointer to lpfc hba data structure.
 * @axri: pointer to the fcp xri abort wcqe structure.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
 * FCP aborted xri.
 **/
void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
			  struct sli4_wcqe_xri_aborted *axri)
{
	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
589
	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
590 591
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;
592 593
	struct lpfc_iocbq *iocbq;
	int i;
594 595
	struct lpfc_nodelist *ndlp;
	int rrq_empty = 0;
596
	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
597

598 599
	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
600 601 602 603
	list_for_each_entry_safe(psb, next_psb,
		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->cur_iocbq.sli4_xritag == xri) {
			list_del(&psb->list);
604
			psb->exch_busy = 0;
605
			psb->status = IOSTAT_SUCCESS;
606 607
			spin_unlock(
				&phba->sli4_hba.abts_scsi_buf_list_lock);
608 609 610 611 612
			if (psb->rdata && psb->rdata->pnode)
				ndlp = psb->rdata->pnode;
			else
				ndlp = NULL;

613
			rrq_empty = list_empty(&phba->active_rrq_list);
614
			spin_unlock_irqrestore(&phba->hbalock, iflag);
615
			if (ndlp) {
616 617
				lpfc_set_rrq_active(phba, ndlp,
					psb->cur_iocbq.sli4_lxritag, rxid, 1);
618 619
				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
			}
620
			lpfc_release_scsi_buf_s4(phba, psb);
621 622
			if (rrq_empty)
				lpfc_worker_wake_up(phba);
623 624 625
			return;
		}
	}
626 627 628 629 630 631 632 633 634 635 636 637
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	for (i = 1; i <= phba->sli.last_iotag; i++) {
		iocbq = phba->sli.iocbq_lookup[i];

		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
			continue;
		if (iocbq->sli4_xritag != xri)
			continue;
		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
		psb->exch_busy = 0;
		spin_unlock_irqrestore(&phba->hbalock, iflag);
638
		if (!list_empty(&pring->txq))
639
			lpfc_worker_wake_up(phba);
640 641 642 643
		return;

	}
	spin_unlock_irqrestore(&phba->hbalock, iflag);
644 645 646
}

/**
647
 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
648
 * @phba: pointer to lpfc hba data structure.
649
 * @post_sblist: pointer to the scsi buffer list.
650
 *
651 652 653 654 655 656
 * This routine walks a list of scsi buffers that was passed in. It attempts
 * to construct blocks of scsi buffer sgls which contains contiguous xris and
 * uses the non-embedded SGL block post mailbox commands to post to the port.
 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
 * embedded SGL post mailbox command for posting. The @post_sblist passed in
 * must be local list, thus no lock is needed when manipulate the list.
657
 *
658
 * Returns: 0 = failure, non-zero number of successfully posted buffers.
659
 **/
660
static int
661 662
lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
			     struct list_head *post_sblist, int sb_count)
663
{
664
	struct lpfc_scsi_buf *psb, *psb_next;
665
	int status, sgl_size;
666 667 668 669 670 671 672 673 674 675 676
	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
	dma_addr_t pdma_phys_bpl1;
	int last_xritag = NO_XRI;
	LIST_HEAD(prep_sblist);
	LIST_HEAD(blck_sblist);
	LIST_HEAD(scsi_sblist);

	/* sanity check */
	if (sb_count <= 0)
		return -EINVAL;

677 678 679
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
		list_del_init(&psb->list);
		block_cnt++;
		if ((last_xritag != NO_XRI) &&
		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
			/* a hole in xri block, form a sgl posting block */
			list_splice_init(&prep_sblist, &blck_sblist);
			post_cnt = block_cnt - 1;
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			block_cnt = 1;
		} else {
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			/* enough sgls for non-embed sgl mbox command */
			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
				block_cnt = 0;
699
			}
700 701 702
		}
		num_posting++;
		last_xritag = psb->cur_iocbq.sli4_xritag;
703

704 705 706 707 708 709 710 711
		/* end of repost sgl list condition for SCSI buffers */
		if (num_posting == sb_count) {
			if (post_cnt == 0) {
				/* last sgl posting block */
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
			} else if (block_cnt == 1) {
				/* last single sgl with non-contiguous xri */
712
				if (sgl_size > SGL_PAGE_SIZE)
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
					pdma_phys_bpl1 = psb->dma_phys_bpl +
								SGL_PAGE_SIZE;
				else
					pdma_phys_bpl1 = 0;
				status = lpfc_sli4_post_sgl(phba,
						psb->dma_phys_bpl,
						pdma_phys_bpl1,
						psb->cur_iocbq.sli4_xritag);
				if (status) {
					/* failure, put on abort scsi list */
					psb->exch_busy = 1;
				} else {
					/* success, put on SCSI buffer list */
					psb->exch_busy = 0;
					psb->status = IOSTAT_SUCCESS;
					num_posted++;
				}
				/* success, put on SCSI buffer sgl list */
				list_add_tail(&psb->list, &scsi_sblist);
			}
		}
734

735 736
		/* continue until a nembed page worth of sgls */
		if (post_cnt == 0)
737
			continue;
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753

		/* post block of SCSI buffer list sgls */
		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
						       post_cnt);

		/* don't reset xirtag due to hole in xri block */
		if (block_cnt == 0)
			last_xritag = NO_XRI;

		/* reset SCSI buffer post count for next round of posting */
		post_cnt = 0;

		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
		while (!list_empty(&blck_sblist)) {
			list_remove_head(&blck_sblist, psb,
					 struct lpfc_scsi_buf, list);
754
			if (status) {
755
				/* failure, put on abort scsi list */
756 757
				psb->exch_busy = 1;
			} else {
758
				/* success, put on SCSI buffer list */
759
				psb->exch_busy = 0;
760
				psb->status = IOSTAT_SUCCESS;
761
				num_posted++;
762
			}
763
			list_add_tail(&psb->list, &scsi_sblist);
764 765
		}
	}
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	/* Push SCSI buffers with sgl posted to the availble list */
	while (!list_empty(&scsi_sblist)) {
		list_remove_head(&scsi_sblist, psb,
				 struct lpfc_scsi_buf, list);
		lpfc_release_scsi_buf_s4(phba, psb);
	}
	return num_posted;
}

/**
 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
 * @phba: pointer to lpfc hba data structure.
 *
 * This routine walks the list of scsi buffers that have been allocated and
 * repost them to the port by using SGL block post. This is needed after a
 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
 *
 * Returns: 0 = success, non-zero failure.
 **/
int
lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
{
	LIST_HEAD(post_sblist);
	int num_posted, rc = 0;

	/* get all SCSI buffers need to repost to a local list */
794
	spin_lock_irq(&phba->scsi_buf_list_get_lock);
795
	spin_lock(&phba->scsi_buf_list_put_lock);
796 797
	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
798
	spin_unlock(&phba->scsi_buf_list_put_lock);
799
	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
800 801 802 803 804 805 806 807 808

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist)) {
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
						phba->sli4_hba.scsi_xri_cnt);
		/* failed to post any scsi buffer, return error */
		if (num_posted == 0)
			rc = -EIO;
	}
809 810 811 812 813 814 815 816
	return rc;
}

/**
 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
817
 * This routine allocates scsi buffers for device with SLI-4 interface spec,
818
 * the scsi buffer contains all the necessary information needed to initiate
819 820
 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
 * them on a list, it post them to the port by using SGL block post.
821 822
 *
 * Return codes:
823
 *   int - number of scsi buffers that were allocated and posted.
824 825 826 827 828 829 830 831 832 833 834
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static int
lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb;
	struct sli4_sge *sgl;
	IOCB_t *iocb;
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
835
	dma_addr_t pdma_phys_bpl;
836
	uint16_t iotag, lxri = 0;
837
	int bcnt, num_posted, sgl_size;
838 839 840
	LIST_HEAD(prep_sblist);
	LIST_HEAD(post_sblist);
	LIST_HEAD(scsi_sblist);
841

842 843 844 845 846 847 848 849 850
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp));

851 852 853 854 855
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
		/*
856 857 858 859
		 * Get memory from the pci pool to map the virt space to
		 * pci bus space for an I/O. The DMA buffer includes space
		 * for the struct fcp_cmnd, struct fcp_rsp and the number
		 * of bde's necessary to support the sg_tablesize.
860 861 862 863 864 865 866 867 868
		 */
		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
						GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}
		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

869 870 871 872 873 874
		/*
		 * 4K Page alignment is CRITICAL to BlockGuard, double check
		 * to be sure.
		 */
		if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
875 876 877 878 879 880
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
				      psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}

881 882 883

		lxri = lpfc_sli4_next_xritag(phba);
		if (lxri == NO_XRI) {
884
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
885
			      psb->data, psb->dma_handle);
886 887 888 889
			kfree(psb);
			break;
		}

890 891 892
		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
893
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
894
				psb->data, psb->dma_handle);
895
			kfree(psb);
896 897 898 899
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"3368 Failed to allocated IOTAG for"
					" XRI:0x%x\n", lxri);
			lpfc_sli4_free_xri(phba, lxri);
900 901
			break;
		}
902 903
		psb->cur_iocbq.sli4_lxritag = lxri;
		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
904 905
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
		psb->fcp_bpl = psb->data;
906
		psb->fcp_cmnd = (psb->data + sgl_size);
907 908 909 910 911 912
		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
					sizeof(struct fcp_cmnd));

		/* Initialize local short-hand pointers. */
		sgl = (struct sli4_sge *)psb->fcp_bpl;
		pdma_phys_bpl = psb->dma_handle;
913
		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
914 915 916
		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);

		/*
917 918 919
		 * The first two bdes are the FCP_CMD and FCP_RSP.
		 * The balance are sg list bdes. Initialize the
		 * first two and leave the rest for queuecommand.
920 921 922
		 */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
923
		sgl->word2 = le32_to_cpu(sgl->word2);
924 925
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
926
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
927 928 929 930 931
		sgl++;

		/* Setup the physical region for the FCP RSP */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
932
		sgl->word2 = le32_to_cpu(sgl->word2);
933 934
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
935
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
		/* setting the BLP size to 2 * sizeof BDE may not be correct.
		 * We are setting the bpl to point to out sgl. An sgl's
		 * entries are 16 bytes, a bpl entries are 12 bytes.
		 */
		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
		iocb->ulpBdeCount = 1;
		iocb->ulpLe = 1;
		iocb->ulpClass = CLASS3;
954
		psb->cur_iocbq.context1 = psb;
955 956
		psb->dma_phys_bpl = pdma_phys_bpl;

957 958
		/* add the scsi buffer to a post list */
		list_add_tail(&psb->list, &post_sblist);
959
		spin_lock_irq(&phba->scsi_buf_list_get_lock);
960
		phba->sli4_hba.scsi_xri_cnt++;
961
		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
962 963 964 965 966 967 968 969 970 971 972 973 974
	}
	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
			"3021 Allocate %d out of %d requested new SCSI "
			"buffers\n", bcnt, num_to_alloc);

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist))
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
							  &post_sblist, bcnt);
	else
		num_posted = 0;

	return num_posted;
975 976
}

977
/**
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
 * This routine wraps the actual SCSI buffer allocator function pointer from
 * the lpfc_hba struct.
 *
 * Return codes:
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static inline int
lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
{
	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
}

/**
996
 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
997
 * @phba: The HBA for which this call is being executed.
998 999 1000 1001 1002 1003 1004 1005
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
1006
static struct lpfc_scsi_buf*
1007
lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
已提交
1008
{
1009
	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
1010
	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1011
	unsigned long iflag = 0;
1012

1013
	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1014 1015 1016
	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
			 list);
	if (!lpfc_cmd) {
1017
		spin_lock(&phba->scsi_buf_list_put_lock);
1018 1019 1020 1021 1022
		list_splice(&phba->lpfc_scsi_buf_list_put,
			    &phba->lpfc_scsi_buf_list_get);
		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
		list_remove_head(scsi_buf_list_get, lpfc_cmd,
				 struct lpfc_scsi_buf, list);
1023
		spin_unlock(&phba->scsi_buf_list_put_lock);
1024
	}
1025
	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1026 1027
	return  lpfc_cmd;
}
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
/**
 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
1042
	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1043
	unsigned long iflag = 0;
1044 1045
	int found = 0;

1046
	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1047 1048
	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
				 &phba->lpfc_scsi_buf_list_get, list) {
1049
		if (lpfc_test_rrq_active(phba, ndlp,
1050
					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1051 1052
			continue;
		list_del(&lpfc_cmd->list);
1053
		found = 1;
1054
		break;
1055
	}
1056
	if (!found) {
1057
		spin_lock(&phba->scsi_buf_list_put_lock);
1058 1059 1060
		list_splice(&phba->lpfc_scsi_buf_list_put,
			    &phba->lpfc_scsi_buf_list_get);
		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1061
		spin_unlock(&phba->scsi_buf_list_put_lock);
1062 1063
		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
					 &phba->lpfc_scsi_buf_list_get, list) {
1064 1065 1066 1067 1068 1069 1070 1071
			if (lpfc_test_rrq_active(
				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
				continue;
			list_del(&lpfc_cmd->list);
			found = 1;
			break;
		}
	}
1072
	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1073 1074
	if (!found)
		return NULL;
1075
	return  lpfc_cmd;
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
}
/**
 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
	return  phba->lpfc_get_scsi_buf(phba, ndlp);
}
已提交
1093

1094
/**
1095
 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1096 1097 1098 1099 1100 1101
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
1102
static void
1103
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1104
{
1105
	unsigned long iflag = 0;
已提交
1106

1107 1108 1109 1110 1111
	psb->seg_cnt = 0;
	psb->nonsg_phys = 0;
	psb->prot_seg_cnt = 0;

	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1112
	psb->pCmd = NULL;
1113
	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1114 1115
	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
已提交
1116 1117
}

1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
/**
 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
 * and cannot be reused for at least RA_TOV amount of time if it was
 * aborted.
 **/
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
	unsigned long iflag = 0;

1133 1134 1135 1136
	psb->seg_cnt = 0;
	psb->nonsg_phys = 0;
	psb->prot_seg_cnt = 0;

1137
	if (psb->exch_busy) {
1138 1139 1140 1141 1142 1143 1144 1145 1146
		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
		psb->pCmd = NULL;
		list_add_tail(&psb->list,
			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
	} else {
		psb->pCmd = NULL;
1147
		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1148 1149 1150
		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1151 1152 1153
	}
}

1154
/**
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{

	phba->lpfc_release_scsi_buf(phba, psb);
}

/**
 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1171 1172 1173 1174
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1175 1176 1177
 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
 * through sg elements and format the bdea. This routine also initializes all
 * IOCB fields which are dependent on scsi command request buffer.
1178 1179 1180 1181 1182
 *
 * Return codes:
 *   1 - Error
 *   0 - Success
 **/
已提交
1183
static int
1184
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
已提交
1185 1186 1187 1188 1189
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1190
	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
已提交
1191
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1192
	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
已提交
1193
	dma_addr_t physaddr;
1194
	uint32_t num_bde = 0;
1195
	int nseg, datadir = scsi_cmnd->sc_data_direction;
已提交
1196 1197 1198 1199 1200 1201 1202 1203

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	bpl += 2;
1204
	if (scsi_sg_count(scsi_cmnd)) {
已提交
1205 1206 1207 1208 1209 1210 1211
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

1212 1213 1214 1215 1216
		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
				  scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!nseg))
			return 1;

1217
		lpfc_cmd->seg_cnt = nseg;
已提交
1218
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1219 1220
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9064 BLKGRD: %s: Too many sg segments from "
1221
			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1222
			       __func__, phba->cfg_sg_seg_cnt,
已提交
1223
			       lpfc_cmd->seg_cnt);
1224
			lpfc_cmd->seg_cnt = 0;
1225
			scsi_dma_unmap(scsi_cmnd);
已提交
1226 1227 1228 1229 1230 1231 1232 1233
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the bde's.
1234 1235 1236
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
已提交
1237
		 */
1238
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
已提交
1239
			physaddr = sg_dma_address(sgel);
1240
			if (phba->sli_rev == 3 &&
1241
			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1242
			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
				data_bde->addrLow = putPaddrLow(physaddr);
				data_bde->addrHigh = putPaddrHigh(physaddr);
				data_bde++;
			} else {
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				bpl->tus.f.bdeSize = sg_dma_len(sgel);
				bpl->tus.w = le32_to_cpu(bpl->tus.w);
				bpl->addrLow =
					le32_to_cpu(putPaddrLow(physaddr));
				bpl->addrHigh =
					le32_to_cpu(putPaddrHigh(physaddr));
				bpl++;
			}
已提交
1259
		}
1260
	}
已提交
1261 1262 1263

	/*
	 * Finish initializing those IOCB fields that are dependent on the
1264 1265 1266
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized and for SLI-3 the extended bde count is
	 * explicitly reinitialized since all iocb memory resources are reused.
已提交
1267
	 */
1268
	if (phba->sli_rev == 3 &&
1269 1270
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
			/*
			 * The extended IOCB format can only fit 3 BDE or a BPL.
			 * This I/O has more than 3 BDE so the 1st data bde will
			 * be a BPL that is filled in here.
			 */
			physaddr = lpfc_cmd->dma_handle;
			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
			data_bde->tus.f.bdeSize = (num_bde *
						   sizeof(struct ulp_bde64));
			physaddr += (sizeof(struct fcp_cmnd) +
				     sizeof(struct fcp_rsp) +
				     (2 * sizeof(struct ulp_bde64)));
			data_bde->addrHigh = putPaddrHigh(physaddr);
			data_bde->addrLow = putPaddrLow(physaddr);
L
Lucas De Marchi 已提交
1286
			/* ebde count includes the response bde and data bpl */
1287 1288
			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
		} else {
L
Lucas De Marchi 已提交
1289
			/* ebde count includes the response bde and data bdes */
1290 1291 1292 1293 1294
			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
		}
	} else {
		iocb_cmd->un.fcpi64.bdl.bdeSize =
			((num_bde + 2) * sizeof(struct ulp_bde64));
1295
		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1296
	}
1297
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1298 1299 1300 1301 1302

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
1303
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1304 1305 1306
	return 0;
}

1307
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1308

1309 1310 1311 1312 1313 1314 1315 1316
/* Return if if error injection is detected by Initiator */
#define BG_ERR_INIT	0x1
/* Return if if error injection is detected by Target */
#define BG_ERR_TGT	0x2
/* Return if if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP	0x10
/* Return if disabling Guard/Ref/App checking is required for error injection */
#define BG_ERR_CHECK	0x20
1317 1318 1319 1320

/**
 * lpfc_bg_err_inject - Determine if we should inject an error
 * @phba: The Hba for which this call is being executed.
1321 1322 1323 1324 1325
 * @sc: The SCSI command to examine
 * @reftag: (out) BlockGuard reference tag for transmitted data
 * @apptag: (out) BlockGuard application tag for transmitted data
 * @new_guard (in) Value to replace CRC with if needed
 *
1326
 * Returns BG_ERR_* bit mask or 0 if request ignored
1327
 **/
1328 1329 1330 1331 1332 1333
static int
lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
1334
	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1335
	struct scsi_dif_tuple *src = NULL;
1336 1337
	struct lpfc_nodelist *ndlp;
	struct lpfc_rport_data *rdata;
1338 1339 1340 1341 1342
	uint32_t op = scsi_get_prot_op(sc);
	uint32_t blksize;
	uint32_t numblks;
	sector_t lba;
	int rc = 0;
1343
	int blockoff = 0;
1344 1345 1346 1347

	if (op == SCSI_PROT_NORMAL)
		return 0;

1348 1349
	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);
1350
	lba = scsi_get_lba(sc);
1351 1352

	/* First check if we need to match the LBA */
1353 1354 1355 1356 1357 1358 1359 1360
	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
		blksize = lpfc_cmd_blksize(sc);
		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;

		/* Make sure we have the right LBA if one is specified */
		if ((phba->lpfc_injerr_lba < lba) ||
			(phba->lpfc_injerr_lba >= (lba + numblks)))
			return 0;
1361 1362 1363 1364 1365 1366 1367
		if (sgpe) {
			blockoff = phba->lpfc_injerr_lba - lba;
			numblks = sg_dma_len(sgpe) /
				sizeof(struct scsi_dif_tuple);
			if (numblks < blockoff)
				blockoff = numblks;
		}
1368 1369
	}

1370
	/* Next check if we need to match the remote NPortID or WWPN */
1371
	rdata = lpfc_rport_data_from_scsi_device(sc->device);
1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
	if (rdata && rdata->pnode) {
		ndlp = rdata->pnode;

		/* Make sure we have the right NPortID if one is specified */
		if (phba->lpfc_injerr_nportid  &&
			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
			return 0;

		/*
		 * Make sure we have the right WWPN if one is specified.
		 * wwn[0] should be a non-zero NAA in a good WWPN.
		 */
		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
				sizeof(struct lpfc_name)) != 0))
			return 0;
	}

	/* Setup a ptr to the protection data if the SCSI host provides it */
	if (sgpe) {
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
		src += blockoff;
		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
	}

1397 1398
	/* Should we change the Reference Tag */
	if (reftag) {
1399 1400 1401
		if (phba->lpfc_injerr_wref_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1402 1403 1404 1405 1406 1407 1408 1409
				if (src) {
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */
1410 1411 1412 1413 1414

					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9076 BLKGRD: Injecting reftag error: "
					"write lba x%lx + x%x oldrefTag x%x\n",
					(unsigned long)lba, blockoff,
1415
					be32_to_cpu(src->ref_tag));
1416

1417
					/*
1418 1419
					 * Save the old ref_tag so we can
					 * restore it on completion.
1420
					 */
1421 1422 1423 1424 1425 1426 1427 1428 1429
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_REFTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->ref_tag;
					}
					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1430
					phba->lpfc_injerr_wref_cnt--;
1431 1432 1433 1434 1435 1436 1437
					if (phba->lpfc_injerr_wref_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1438 1439
					rc = BG_ERR_TGT | BG_ERR_CHECK;

1440 1441 1442
					break;
				}
				/* Drop thru */
1443
			case SCSI_PROT_WRITE_INSERT:
1444
				/*
1445 1446 1447
				 * For WRITE_INSERT, force the error
				 * to be sent on the wire. It should be
				 * detected by the Target.
1448
				 */
1449
				/* DEADBEEF will be the reftag on the wire */
1450 1451
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1452 1453 1454 1455 1456 1457 1458
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
					LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1459
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1460 1461

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1462
					"9078 BLKGRD: Injecting reftag error: "
1463 1464
					"write lba x%lx\n", (unsigned long)lba);
				break;
1465
			case SCSI_PROT_WRITE_STRIP:
1466
				/*
1467 1468 1469
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1470
				 */
1471 1472
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1473 1474 1475 1476 1477 1478 1479
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1480
				rc = BG_ERR_INIT;
1481 1482

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1483
					"9077 BLKGRD: Injecting reftag error: "
1484
					"write lba x%lx\n", (unsigned long)lba);
1485
				break;
1486
			}
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
		}
		if (phba->lpfc_injerr_rref_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1498 1499
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_rref_cnt--;
1500 1501 1502 1503 1504 1505 1506
				if (phba->lpfc_injerr_rref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1507
				rc = BG_ERR_INIT;
1508 1509

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1510
					"9079 BLKGRD: Injecting reftag error: "
1511
					"read lba x%lx\n", (unsigned long)lba);
1512
				break;
1513 1514 1515 1516 1517 1518
			}
		}
	}

	/* Should we change the Application Tag */
	if (apptag) {
1519 1520 1521
		if (phba->lpfc_injerr_wapp_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1522
				if (src) {
1523 1524 1525 1526 1527 1528 1529 1530
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */

1531 1532 1533 1534
					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9080 BLKGRD: Injecting apptag error: "
					"write lba x%lx + x%x oldappTag x%x\n",
					(unsigned long)lba, blockoff,
1535
					be16_to_cpu(src->app_tag));
1536 1537

					/*
1538 1539
					 * Save the old app_tag so we can
					 * restore it on completion.
1540
					 */
1541 1542 1543 1544 1545 1546 1547 1548 1549
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_APPTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->app_tag;
					}
					src->app_tag = cpu_to_be16(0xDEAD);
1550
					phba->lpfc_injerr_wapp_cnt--;
1551 1552 1553 1554 1555 1556 1557
					if (phba->lpfc_injerr_wapp_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1558
					rc = BG_ERR_TGT | BG_ERR_CHECK;
1559 1560 1561
					break;
				}
				/* Drop thru */
1562
			case SCSI_PROT_WRITE_INSERT:
1563
				/*
1564 1565 1566
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1567
				 */
1568
				/* DEAD will be the apptag on the wire */
1569 1570
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1571 1572 1573 1574 1575 1576 1577
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1578
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1579

1580
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1581
					"0813 BLKGRD: Injecting apptag error: "
1582 1583
					"write lba x%lx\n", (unsigned long)lba);
				break;
1584
			case SCSI_PROT_WRITE_STRIP:
1585
				/*
1586 1587 1588
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1589
				 */
1590 1591
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1592 1593 1594 1595 1596 1597 1598
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1599
				rc = BG_ERR_INIT;
1600 1601

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1602
					"0812 BLKGRD: Injecting apptag error: "
1603
					"write lba x%lx\n", (unsigned long)lba);
1604
				break;
1605
			}
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
		}
		if (phba->lpfc_injerr_rapp_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1617 1618
				*apptag = 0xDEAD;
				phba->lpfc_injerr_rapp_cnt--;
1619 1620 1621 1622 1623 1624 1625
				if (phba->lpfc_injerr_rapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1626
				rc = BG_ERR_INIT;
1627 1628

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1629
					"0814 BLKGRD: Injecting apptag error: "
1630
					"read lba x%lx\n", (unsigned long)lba);
1631
				break;
1632 1633 1634 1635
			}
		}
	}

1636

1637
	/* Should we change the Guard Tag */
1638 1639 1640 1641
	if (new_guard) {
		if (phba->lpfc_injerr_wgrd_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1642
				rc = BG_ERR_CHECK;
1643
				/* Drop thru */
1644 1645

			case SCSI_PROT_WRITE_INSERT:
1646
				/*
1647 1648 1649
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1650 1651
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1652 1653 1654 1655 1656 1657 1658
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1659

1660
				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1661
				/* Signals the caller to swap CRC->CSUM */
1662

1663
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1664
					"0817 BLKGRD: Injecting guard error: "
1665 1666
					"write lba x%lx\n", (unsigned long)lba);
				break;
1667
			case SCSI_PROT_WRITE_STRIP:
1668
				/*
1669 1670 1671
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1672 1673
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1674 1675 1676 1677 1678 1679 1680
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1681

1682
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1683 1684 1685
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1686
					"0816 BLKGRD: Injecting guard error: "
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
					"write lba x%lx\n", (unsigned long)lba);
				break;
			}
		}
		if (phba->lpfc_injerr_rgrd_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
				phba->lpfc_injerr_rgrd_cnt--;
1702 1703 1704 1705 1706 1707 1708
				if (phba->lpfc_injerr_rgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1709

1710
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1711 1712 1713 1714 1715 1716
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"0818 BLKGRD: Injecting guard error: "
					"read lba x%lx\n", (unsigned long)lba);
			}
1717 1718
		}
	}
1719

1720 1721 1722 1723
	return rc;
}
#endif

1724 1725 1726 1727
/**
 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
 * the specified SCSI command.
 * @phba: The Hba for which this call is being executed.
1728 1729 1730 1731 1732 1733
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
1734
 **/
1735
static int
1736 1737
lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
1738
{
1739
	uint8_t ret = 0;
1740

1741
	if (lpfc_cmd_guard_csum(sc)) {
1742 1743 1744
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
1745
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1746
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1747 1748 1749 1750
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1751
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1752
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1753 1754
			break;

1755 1756
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1757
			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1758
			*txop = BG_OP_IN_CSUM_OUT_CRC;
1759 1760 1761 1762
			break;

		case SCSI_PROT_NORMAL:
		default:
1763
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1764 1765
				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
					scsi_get_prot_op(sc));
1766
			ret = 1;
1767 1768 1769
			break;

		}
J
James Smart 已提交
1770
	} else {
1771 1772 1773
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1774
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1775
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1776 1777 1778 1779
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1780
			*rxop = BG_OP_IN_CRC_OUT_CRC;
1781
			*txop = BG_OP_IN_CRC_OUT_CRC;
1782 1783 1784 1785
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
J
James Smart 已提交
1786
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1787
			*txop = BG_OP_IN_CRC_OUT_NODIF;
J
James Smart 已提交
1788 1789
			break;

1790 1791
		case SCSI_PROT_NORMAL:
		default:
1792
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1793 1794
				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
					scsi_get_prot_op(sc));
1795
			ret = 1;
1796 1797 1798 1799
			break;
		}
	}

1800
	return ret;
1801 1802
}

1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
 * the specified SCSI command in order to force a guard tag error.
 * @phba: The Hba for which this call is being executed.
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
 **/
static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
{
	uint8_t ret = 0;

1821
	if (lpfc_cmd_guard_csum(sc)) {
1822 1823 1824 1825
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1826
			*txop = BG_OP_IN_CRC_OUT_NODIF;
1827 1828 1829 1830 1831
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1832
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1833 1834 1835 1836
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1837
			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1838
			*txop = BG_OP_IN_CRC_OUT_CSUM;
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;

		}
	} else {
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1851
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1852 1853 1854 1855
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1856
			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1857
			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1858 1859 1860 1861 1862
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1863
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;
		}
	}

	return ret;
}
#endif

/**
 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
1884 1885 1886 1887 1888 1889 1890 1891 1892
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
1893 1894 1895
 *   start of prot group  -->     |          PDE_5          |
 *                                +-------------------------+
 *                                |          PDE_6          |
1896 1897 1898 1899 1900 1901 1902 1903
 *                                +-------------------------+
 *                                |         Data BDE        |
 *                                +-------------------------+
 *                                |more Data BDE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
1904 1905 1906
 *
 * Returns the number of BDEs added to the BPL.
 **/
1907 1908 1909 1910 1911
static int
lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
1912 1913
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
1914
	dma_addr_t physaddr;
1915
	int i = 0, num_bde = 0, status;
1916
	int datadir = sc->sc_data_direction;
1917
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1918
	uint32_t rc;
1919
#endif
1920
	uint32_t checking = 1;
1921
	uint32_t reftag;
J
James Smart 已提交
1922
	unsigned blksize;
1923
	uint8_t txop, rxop;
1924

1925 1926
	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
1927 1928
		goto out;

1929
	/* extract some info from the scsi command for pde*/
1930
	blksize = lpfc_cmd_blksize(sc);
1931
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1932

1933
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1934
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1935
	if (rc) {
1936
		if (rc & BG_ERR_SWAP)
1937
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1938
		if (rc & BG_ERR_CHECK)
1939 1940
			checking = 0;
	}
1941 1942
#endif

1943 1944 1945 1946 1947
	/* setup PDE5 with what we have */
	pde5 = (struct lpfc_pde5 *) bpl;
	memset(pde5, 0, sizeof(struct lpfc_pde5));
	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

1948
	/* Endianness conversion if necessary for PDE5 */
1949
	pde5->word0 = cpu_to_le32(pde5->word0);
J
James Smart 已提交
1950
	pde5->reftag = cpu_to_le32(reftag);
1951

1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
	/* advance bpl and increment bde count */
	num_bde++;
	bpl++;
	pde6 = (struct lpfc_pde6 *) bpl;

	/* setup PDE6 with the rest of the info */
	memset(pde6, 0, sizeof(struct lpfc_pde6));
	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
	bf_set(pde6_optx, pde6, txop);
	bf_set(pde6_oprx, pde6, rxop);
1962 1963 1964 1965 1966

	/*
	 * We only need to check the data on READs, for WRITEs
	 * protection data is automatically generated, not checked.
	 */
1967
	if (datadir == DMA_FROM_DEVICE) {
1968
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1969 1970 1971 1972
			bf_set(pde6_ce, pde6, checking);
		else
			bf_set(pde6_ce, pde6, 0);

1973
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1974 1975 1976
			bf_set(pde6_re, pde6, checking);
		else
			bf_set(pde6_re, pde6, 0);
1977 1978
	}
	bf_set(pde6_ai, pde6, 1);
J
James Smart 已提交
1979 1980
	bf_set(pde6_ae, pde6, 0);
	bf_set(pde6_apptagval, pde6, 0);
1981

1982
	/* Endianness conversion if necessary for PDE6 */
1983 1984 1985 1986
	pde6->word0 = cpu_to_le32(pde6->word0);
	pde6->word1 = cpu_to_le32(pde6->word1);
	pde6->word2 = cpu_to_le32(pde6->word2);

1987
	/* advance bpl and increment bde count */
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
	num_bde++;
	bpl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
		bpl->tus.f.bdeSize = sg_dma_len(sgde);
		if (datadir == DMA_TO_DEVICE)
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		else
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
		bpl->tus.w = le32_to_cpu(bpl->tus.w);
		bpl++;
		num_bde++;
	}

out:
	return num_bde;
}

2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
/**
 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
2020 2021 2022 2023 2024 2025 2026 2027 2028
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
2029
 *   start of first prot group  -->   |          PDE_5          |
2030
 *                                    +-------------------------+
2031 2032 2033
 *                                    |          PDE_6          |
 *                                    +-------------------------+
 *                                    |      PDE_7 (Prot BDE)   |
2034 2035 2036 2037 2038
 *                                    +-------------------------+
 *                                    |        Data BDE         |
 *                                    +-------------------------+
 *                                    |more Data BDE's ... (opt)|
 *                                    +-------------------------+
2039
 *   start of new  prot group  -->    |          PDE_5          |
2040 2041 2042 2043 2044 2045
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
2046 2047 2048
 *
 * Returns the number of BDEs added to the BPL.
 **/
2049 2050 2051 2052 2053 2054
static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2055 2056
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
2057
	struct lpfc_pde7 *pde7 = NULL;
2058 2059
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
2060 2061
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2062 2063
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
2064
	int status;
2065 2066 2067
	int datadir = sc->sc_data_direction;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
2068
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2069
	uint32_t rc;
2070
#endif
2071
	uint32_t checking = 1;
2072
	uint32_t reftag;
2073
	uint8_t txop, rxop;
2074 2075 2076 2077 2078 2079 2080
	int num_bde = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
				sgpe, sgde);
		return 0;
	}

	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command */
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2095
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2096
	if (rc) {
2097
		if (rc & BG_ERR_SWAP)
2098
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2099
		if (rc & BG_ERR_CHECK)
2100 2101 2102 2103 2104 2105
			checking = 0;
	}
#endif

	split_offset = 0;
	do {
2106 2107 2108 2109
		/* Check to see if we ran out of space */
		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
			return num_bde + 3;

2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128
		/* setup PDE5 with what we have */
		pde5 = (struct lpfc_pde5 *) bpl;
		memset(pde5, 0, sizeof(struct lpfc_pde5));
		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

		/* Endianness conversion if necessary for PDE5 */
		pde5->word0 = cpu_to_le32(pde5->word0);
		pde5->reftag = cpu_to_le32(reftag);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;
		pde6 = (struct lpfc_pde6 *) bpl;

		/* setup PDE6 with the rest of the info */
		memset(pde6, 0, sizeof(struct lpfc_pde6));
		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
		bf_set(pde6_optx, pde6, txop);
		bf_set(pde6_oprx, pde6, rxop);
2129

2130
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2131 2132 2133 2134
			bf_set(pde6_ce, pde6, checking);
		else
			bf_set(pde6_ce, pde6, 0);

2135
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2136 2137 2138 2139
			bf_set(pde6_re, pde6, checking);
		else
			bf_set(pde6_re, pde6, 0);

2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
		bf_set(pde6_ai, pde6, 1);
		bf_set(pde6_ae, pde6, 0);
		bf_set(pde6_apptagval, pde6, 0);

		/* Endianness conversion if necessary for PDE6 */
		pde6->word0 = cpu_to_le32(pde6->word0);
		pde6->word1 = cpu_to_le32(pde6->word1);
		pde6->word2 = cpu_to_le32(pde6->word2);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;

		/* setup the first BDE that points to protection buffer */
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

		pde7 = (struct lpfc_pde7 *) bpl;
		memset(pde7, 0, sizeof(struct lpfc_pde7));
		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);

		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));

		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

		/* check if this pde is crossing the 4K boundary; if so split */
		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
			protgrp_bytes = protgrp_blks * blksize;
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}

		num_bde++;

		/* setup BDE's for data blocks associated with DIF data */
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2187 2188 2189 2190
			/* Check to see if we ran out of space */
			if (num_bde >= phba->cfg_total_seg_cnt)
				return num_bde + 1;

2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
			if (!sgde) {
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9065 BLKGRD:%s Invalid data segment\n",
						__func__);
				return 0;
			}
			bpl++;
			dataphysaddr = sg_dma_address(sgde) + split_offset;
			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
				bpl->tus.f.bdeSize = remainder;
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
				split_offset += bpl->tus.f.bdeSize;
			}

			subtotal += bpl->tus.f.bdeSize;

			if (datadir == DMA_TO_DEVICE)
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			else
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
			bpl->tus.w = le32_to_cpu(bpl->tus.w);

			num_bde++;
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);

		}

		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
			bpl++;
			continue;
		}

		/* are we done ? */
		if (curr_prot == protcnt) {
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
			bpl++;

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9054 BLKGRD: bug in %s\n", __func__);
		}

	} while (!alldone);
out:

	return num_bde;
}

/**
 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
 *   start of prot group  -->     |         DI_SEED         |
 *                                +-------------------------+
 *                                |         Data SGE        |
 *                                +-------------------------+
 *                                |more Data SGE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t physaddr;
	int i = 0, num_sge = 0, status;
	uint32_t reftag;
	unsigned blksize;
	uint8_t txop, rxop;
2305
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2306
	uint32_t rc;
2307
#endif
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320
	uint32_t checking = 1;
	uint32_t dma_len;
	uint32_t dma_offset = 0;

	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command for pde*/
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2321
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2322
	if (rc) {
2323
		if (rc & BG_ERR_SWAP)
2324
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2325
		if (rc & BG_ERR_CHECK)
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
			checking = 0;
	}
#endif

	/* setup DISEED with what we have */
	diseed = (struct sli4_sge_diseed *) sgl;
	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

	/* Endianness conversion if necessary */
	diseed->ref_tag = cpu_to_le32(reftag);
	diseed->ref_tag_tran = diseed->ref_tag;

2339 2340 2341 2342 2343
	/*
	 * We only need to check the data on READs, for WRITEs
	 * protection data is automatically generated, not checked.
	 */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2344
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2345 2346 2347 2348
			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);

2349
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2350 2351 2352 2353 2354
			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
	}

2355 2356 2357
	/* setup DISEED with the rest of the info */
	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2358

2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449
	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

	/* Endianness conversion if necessary for DISEED */
	diseed->word2 = cpu_to_le32(diseed->word2);
	diseed->word3 = cpu_to_le32(diseed->word3);

	/* advance bpl and increment sge count */
	num_sge++;
	sgl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		dma_len = sg_dma_len(sgde);
		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
		if ((i + 1) == datasegcnt)
			bf_set(lpfc_sli4_sge_last, sgl, 1);
		else
			bf_set(lpfc_sli4_sge_last, sgl, 0);
		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);

		sgl->sge_len = cpu_to_le32(dma_len);
		dma_offset += dma_len;

		sgl++;
		num_sge++;
	}

out:
	return num_sge;
}

/**
 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
 *   start of first prot group  -->   |         DISEED          |
 *                                    +-------------------------+
 *                                    |      DIF (Prot SGE)     |
 *                                    +-------------------------+
 *                                    |        Data SGE         |
 *                                    +-------------------------+
 *                                    |more Data SGE's ... (opt)|
 *                                    +-------------------------+
 *   start of new  prot group  -->    |         DISEED          |
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
	int status;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
	uint32_t reftag;
	uint8_t txop, rxop;
	uint32_t dma_len;
2450
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2451
	uint32_t rc;
2452
#endif
2453 2454 2455 2456 2457 2458 2459 2460 2461 2462
	uint32_t checking = 1;
	uint32_t dma_offset = 0;
	int num_sge = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2463 2464 2465 2466
				sgpe, sgde);
		return 0;
	}

2467 2468
	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
2469 2470
		goto out;

2471
	/* extract some info from the scsi command */
2472
	blksize = lpfc_cmd_blksize(sc);
2473
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2474

2475
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2476
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2477
	if (rc) {
2478
		if (rc & BG_ERR_SWAP)
2479
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2480
		if (rc & BG_ERR_CHECK)
2481 2482
			checking = 0;
	}
2483 2484
#endif

2485 2486
	split_offset = 0;
	do {
2487 2488 2489 2490
		/* Check to see if we ran out of space */
		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
			return num_sge + 3;

2491 2492 2493 2494 2495 2496 2497 2498 2499
		/* setup DISEED with what we have */
		diseed = (struct sli4_sge_diseed *) sgl;
		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

		/* Endianness conversion if necessary */
		diseed->ref_tag = cpu_to_le32(reftag);
		diseed->ref_tag_tran = diseed->ref_tag;

2500
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);

		} else {
			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
			/*
			 * When in this mode, the hardware will replace
			 * the guard tag from the host with a
			 * newly generated good CRC for the wire.
			 * Switch to raw mode here to avoid this
			 * behavior. What the host sends gets put on the wire.
			 */
			if (txop == BG_OP_IN_CRC_OUT_CRC) {
				txop = BG_OP_RAW_MODE;
				rxop = BG_OP_RAW_MODE;
			}
		}


2519
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2520 2521 2522 2523
			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);

2524 2525 2526
		/* setup DISEED with the rest of the info */
		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2527

2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

		/* Endianness conversion if necessary for DISEED */
		diseed->word2 = cpu_to_le32(diseed->word2);
		diseed->word3 = cpu_to_le32(diseed->word3);

		/* advance sgl and increment bde count */
		num_sge++;
		sgl++;
2538 2539

		/* setup the first BDE that points to protection buffer */
2540 2541
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2542 2543 2544 2545

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

2546 2547 2548 2549 2550 2551
		/* Now setup DIF SGE */
		sgl->word2 = 0;
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
		sgl->word2 = cpu_to_le32(sgl->word2);
2552

2553 2554 2555
		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

2556 2557 2558
		/* check if DIF SGE is crossing the 4K boundary; if so split */
		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2559 2560
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
J
James Smart 已提交
2561
			protgrp_bytes = protgrp_blks * blksize;
2562 2563 2564 2565
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}
2566

2567
		num_sge++;
2568

2569
		/* setup SGE's for data blocks associated with DIF data */
2570 2571 2572
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2573 2574 2575 2576
			/* Check to see if we ran out of space */
			if (num_sge >= phba->cfg_total_seg_cnt)
				return num_sge + 1;

2577
			if (!sgde) {
2578
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2579
					"9086 BLKGRD:%s Invalid data segment\n",
2580 2581 2582
						__func__);
				return 0;
			}
2583
			sgl++;
2584 2585 2586 2587 2588 2589
			dataphysaddr = sg_dma_address(sgde) + split_offset;

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
2590
				dma_len = remainder;
2591 2592 2593 2594 2595 2596
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
2597 2598
				dma_len = protgrp_bytes - subtotal;
				split_offset += dma_len;
2599 2600
			}

2601
			subtotal += dma_len;
2602

2603 2604 2605 2606 2607
			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
			bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2608

2609 2610 2611 2612
			sgl->sge_len = cpu_to_le32(dma_len);
			dma_offset += dma_len;

			num_sge++;
2613 2614 2615 2616 2617 2618 2619 2620 2621
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);
		}

2622 2623 2624
		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
2625
			sgl++;
2626 2627 2628
			continue;
		}

2629 2630
		/* are we done ? */
		if (curr_prot == protcnt) {
2631
			bf_set(lpfc_sli4_sge_last, sgl, 1);
2632 2633 2634 2635
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
2636
			sgl++;
2637 2638 2639 2640 2641

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
2642
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2643
				"9085 BLKGRD: bug in %s\n", __func__);
2644 2645 2646
		}

	} while (!alldone);
2647

2648 2649
out:

2650
	return num_sge;
2651
}
2652

2653 2654 2655 2656 2657
/**
 * lpfc_prot_group_type - Get prtotection group type of SCSI command
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 *
2658 2659 2660
 * Given a SCSI command that supports DIF, determine composition of protection
 * groups involved in setting up buffer lists
 *
2661 2662 2663
 * Returns: Protection group type (with or without DIF)
 *
 **/
2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
{
	int ret = LPFC_PG_TYPE_INVALID;
	unsigned char op = scsi_get_prot_op(sc);

	switch (op) {
	case SCSI_PROT_READ_STRIP:
	case SCSI_PROT_WRITE_INSERT:
		ret = LPFC_PG_TYPE_NO_DIF;
		break;
	case SCSI_PROT_READ_INSERT:
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_PASS:
	case SCSI_PROT_WRITE_PASS:
		ret = LPFC_PG_TYPE_DIF_BUF;
		break;
	default:
2682 2683 2684 2685
		if (phba)
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9021 Unsupported protection op:%d\n",
					op);
2686 2687 2688 2689 2690
		break;
	}
	return ret;
}

2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
/**
 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
 *
 * Adjust the data length to account for how much data
 * is actually on the wire.
 *
 * returns the adjusted data length
 **/
static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
		       struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
	int fcpdl;

	fcpdl = scsi_bufflen(sc);

	/* Check if there is protection data on the wire */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2712
		/* Read check for protection data */
2713 2714 2715 2716
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
			return fcpdl;

	} else {
2717
		/* Write check for protection data */
2718 2719 2720 2721 2722 2723
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
			return fcpdl;
	}

	/*
	 * If we are in DIF Type 1 mode every data block has a 8 byte
2724 2725
	 * DIF (trailer) attached to it. Must ajust FCP data length
	 * to account for the protection data.
2726
	 */
2727
	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2728 2729 2730 2731

	return fcpdl;
}

2732 2733 2734 2735 2736
/**
 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
 *
2737 2738 2739
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
2740
 **/
2741
static int
2742
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2743 2744 2745 2746 2747 2748 2749 2750 2751
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	uint32_t num_bde = 0;
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
2752
	int fcpdl;
2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772

	/*
	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
	 *  fcp_rsp regions to the first data bde entry
	 */
	bpl += 2;
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		lpfc_cmd->seg_cnt = datasegcnt;
2773 2774 2775 2776

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
2777 2778 2779 2780 2781

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
2782 2783 2784 2785 2786

			/* Here we need to add a PDE5 and PDE6 to the count */
			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
				goto err;

2787 2788
			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
					datasegcnt);
2789
			/* we should have 2 or more entries in buffer list */
2790 2791 2792
			if (num_bde < 2)
				goto err;
			break;
2793 2794

		case LPFC_PG_TYPE_DIF_BUF:
2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
2809 2810 2811 2812 2813 2814 2815 2816

			/*
			 * There is a minimun of 4 BPLs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 4) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
2817 2818 2819

			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
					datasegcnt, protsegcnt);
2820
			/* we should have 3 or more entries in buffer list */
2821 2822
			if ((num_bde < 3) ||
			    (num_bde > phba->cfg_total_seg_cnt))
2823 2824
				goto err;
			break;
2825

2826 2827
		case LPFC_PG_TYPE_INVALID:
		default:
2828 2829 2830
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9022 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
	 * reinitialized since all iocb memory resources are used many times
	 * for transmit, receive, and continuation bpl's.
	 */
	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
	iocb_cmd->ulpBdeCount = 1;
	iocb_cmd->ulpLe = 1;

2849
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2850 2851 2852 2853 2854 2855 2856 2857
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

已提交
2858
	return 0;
2859
err:
2860 2861 2862 2863 2864 2865 2866
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

2867
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2868 2869 2870 2871
			"9023 Cannot setup S/G List for HBA"
			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2872
			prot_group_type, num_bde);
2873 2874 2875

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
2876 2877 2878
	return 1;
}

2879 2880 2881 2882 2883
/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CRC algorithmn
 * using crc_t10dif.
 */
2884
static uint16_t
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
lpfc_bg_crc(uint8_t *data, int count)
{
	uint16_t crc = 0;
	uint16_t x;

	crc = crc_t10dif(data, count);
	x = cpu_to_be16(crc);
	return x;
}

/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CSUM algorithmn
 * using ip_compute_csum.
 */
2900
static uint16_t
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
lpfc_bg_csum(uint8_t *data, int count)
{
	uint16_t ret;

	ret = ip_compute_csum(data, count);
	return ret;
}

/*
 * This function examines the protection data to try to determine
 * what type of T10-DIF error occurred.
 */
2913
static void
2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct scsi_dif_tuple *src = NULL;
	uint8_t *data_src = NULL;
	uint16_t guard_tag, guard_type;
	uint16_t start_app_tag, app_tag;
	uint32_t start_ref_tag, ref_tag;
	int prot, protsegcnt;
	int err_type, len, data_len;
	int chk_ref, chk_app, chk_guard;
	uint16_t sum;
	unsigned blksize;

	err_type = BGS_GUARD_ERR_MASK;
	sum = 0;
	guard_tag = 0;

	/* First check to see if there is protection data to examine */
	prot = scsi_get_prot_op(cmd);
	if ((prot == SCSI_PROT_READ_STRIP) ||
	    (prot == SCSI_PROT_WRITE_INSERT) ||
	    (prot == SCSI_PROT_NORMAL))
		goto out;

	/* Currently the driver just supports ref_tag and guard_tag checking */
	chk_ref = 1;
	chk_app = 0;
	chk_guard = 0;

	/* Setup a ptr to the protection data provided by the SCSI host */
	sgpe = scsi_prot_sglist(cmd);
	protsegcnt = lpfc_cmd->prot_seg_cnt;

	if (sgpe && protsegcnt) {

		/*
		 * We will only try to verify guard tag if the segment
		 * data length is a multiple of the blksize.
		 */
		sgde = scsi_sglist(cmd);
		blksize = lpfc_cmd_blksize(cmd);
		data_src = (uint8_t *)sg_virt(sgde);
		data_len = sgde->length;
		if ((data_len & (blksize - 1)) == 0)
			chk_guard = 1;
		guard_type = scsi_host_get_guard(cmd->device->host);

2964
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2965
		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
		start_app_tag = src->app_tag;
		len = sgpe->length;
		while (src && protsegcnt) {
			while (len) {

				/*
				 * First check to see if a protection data
				 * check is valid
				 */
				if ((src->ref_tag == 0xffffffff) ||
				    (src->app_tag == 0xffff)) {
					start_ref_tag++;
					goto skipit;
				}

2981
				/* First Guard Tag checking */
2982 2983
				if (chk_guard) {
					guard_tag = src->guard_tag;
2984
					if (lpfc_cmd_guard_csum(cmd))
2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
						sum = lpfc_bg_csum(data_src,
								   blksize);
					else
						sum = lpfc_bg_crc(data_src,
								  blksize);
					if ((guard_tag != sum)) {
						err_type = BGS_GUARD_ERR_MASK;
						goto out;
					}
				}
2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009

				/* Reference Tag checking */
				ref_tag = be32_to_cpu(src->ref_tag);
				if (chk_ref && (ref_tag != start_ref_tag)) {
					err_type = BGS_REFTAG_ERR_MASK;
					goto out;
				}
				start_ref_tag++;

				/* App Tag checking */
				app_tag = src->app_tag;
				if (chk_app && (app_tag != start_app_tag)) {
					err_type = BGS_APPTAG_ERR_MASK;
					goto out;
				}
3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086
skipit:
				len -= sizeof(struct scsi_dif_tuple);
				if (len < 0)
					len = 0;
				src++;

				data_src += blksize;
				data_len -= blksize;

				/*
				 * Are we at the end of the Data segment?
				 * The data segment is only used for Guard
				 * tag checking.
				 */
				if (chk_guard && (data_len == 0)) {
					chk_guard = 0;
					sgde = sg_next(sgde);
					if (!sgde)
						goto out;

					data_src = (uint8_t *)sg_virt(sgde);
					data_len = sgde->length;
					if ((data_len & (blksize - 1)) == 0)
						chk_guard = 1;
				}
			}

			/* Goto the next Protection data segment */
			sgpe = sg_next(sgpe);
			if (sgpe) {
				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
				len = sgpe->length;
			} else {
				src = NULL;
			}
			protsegcnt--;
		}
	}
out:
	if (err_type == BGS_GUARD_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x1);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				sum, guard_tag);

	} else if (err_type == BGS_REFTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x3);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				ref_tag, start_ref_tag);

	} else if (err_type == BGS_APPTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x2);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				app_tag, start_app_tag);
	}
}


3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111
/*
 * This function checks for BlockGuard errors detected by
 * the HBA.  In case of errors, the ASC/ASCQ fields in the
 * sense buffer will be set accordingly, paired with
 * ILLEGAL_REQUEST to signal to the kernel that the HBA
 * detected corruption.
 *
 * Returns:
 *  0 - No error found
 *  1 - BlockGuard error found
 * -1 - Internal error (bad profile, ...etc)
 */
static int
lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
			struct lpfc_iocbq *pIocbOut)
{
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
	int ret = 0;
	uint32_t bghm = bgf->bghm;
	uint32_t bgstat = bgf->bgstat;
	uint64_t failing_sector = 0;

	spin_lock(&_dump_buf_lock);
	if (!_dump_buf_done) {
3112 3113
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
			" Data for %u blocks to debugfs\n",
3114
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3115
		lpfc_debug_save_data(phba, cmd);
3116 3117 3118 3119

		/* If we have a prot sgl, save the DIF buffer */
		if (lpfc_prot_group_type(phba, cmd) ==
				LPFC_PG_TYPE_DIF_BUF) {
3120 3121 3122 3123
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
				"Saving DIF for %u blocks to debugfs\n",
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
			lpfc_debug_save_dif(phba, cmd);
3124 3125 3126 3127 3128 3129 3130 3131
		}

		_dump_buf_done = 1;
	}
	spin_unlock(&_dump_buf_lock);

	if (lpfc_bgs_get_invalid_prof(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3132 3133 3134 3135 3136 3137
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9072 BLKGRD: Invalid BG Profile in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3138 3139 3140 3141 3142 3143
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3144 3145 3146 3147 3148 3149
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3150 3151 3152 3153 3154 3155 3156 3157 3158
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_guard_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x1);
M
Martin K. Petersen 已提交
3159
		cmd->result = DRIVER_SENSE << 24
3160 3161
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
3162 3163 3164 3165 3166 3167
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9055 BLKGRD: Guard Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3168 3169 3170 3171 3172 3173 3174
	}

	if (lpfc_bgs_get_reftag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x3);
M
Martin K. Petersen 已提交
3175
		cmd->result = DRIVER_SENSE << 24
3176 3177 3178
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
3179 3180 3181 3182 3183 3184
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9056 BLKGRD: Ref Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3185 3186 3187 3188 3189 3190 3191
	}

	if (lpfc_bgs_get_apptag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x2);
M
Martin K. Petersen 已提交
3192
		cmd->result = DRIVER_SENSE << 24
3193 3194 3195
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
3196 3197 3198 3199 3200 3201
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9061 BLKGRD: App Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3202 3203 3204 3205 3206
	}

	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
		/*
		 * setup sense data descriptor 0 per SPC-4 as an information
J
James Smart 已提交
3207 3208 3209
		 * field, and put the failing LBA in it.
		 * This code assumes there was also a guard/app/ref tag error
		 * indication.
3210
		 */
J
James Smart 已提交
3211 3212 3213 3214
		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229

		/* bghm is a "on the wire" FC frame based count */
		switch (scsi_get_prot_op(cmd)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			bghm /= cmd->device->sector_size;
			break;
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
			bghm /= (cmd->device->sector_size +
				sizeof(struct scsi_dif_tuple));
			break;
		}
3230 3231 3232 3233

		failing_sector = scsi_get_lba(cmd);
		failing_sector += bghm;

J
James Smart 已提交
3234 3235
		/* Descriptor Information */
		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3236 3237 3238 3239
	}

	if (!ret) {
		/* No error was reported - problem in FW? */
3240 3241 3242 3243 3244 3245 3246 3247 3248
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9057 BLKGRD: Unknown error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);

		/* Calcuate what type of error it was */
		lpfc_calc_bg_err(phba, lpfc_cmd);
3249 3250 3251
	}
out:
	return ret;
已提交
3252 3253
}

3254 3255 3256 3257 3258 3259 3260 3261 3262
/**
 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 * field of @lpfc_cmd for device with SLI-4 interface spec.
 *
 * Return codes:
3263 3264
 *	1 - Error
 *	0 - Success
3265 3266 3267 3268 3269 3270 3271 3272
 **/
static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3273
	struct sli4_sge *first_data_sgl;
3274 3275 3276 3277 3278 3279
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	dma_addr_t physaddr;
	uint32_t num_bde = 0;
	uint32_t dma_len;
	uint32_t dma_offset = 0;
	int nseg;
3280
	struct ulp_bde64 *bde;
3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

		nseg = scsi_dma_map(scsi_cmnd);
		if (unlikely(!nseg))
			return 1;
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
		sgl += 1;
3305
		first_data_sgl = sgl;
3306 3307
		lpfc_cmd->seg_cnt = nseg;
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3308 3309 3310 3311
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
				" %s: Too many sg segments from "
				"dma_map_sg.  Config %d, seg_cnt %d\n",
				__func__, phba->cfg_sg_seg_cnt,
3312
			       lpfc_cmd->seg_cnt);
3313
			lpfc_cmd->seg_cnt = 0;
3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
			scsi_dma_unmap(scsi_cmnd);
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the sge's.
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
		 */
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
			physaddr = sg_dma_address(sgel);
			dma_len = sg_dma_len(sgel);
			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3332
			sgl->word2 = le32_to_cpu(sgl->word2);
3333 3334 3335 3336 3337
			if ((num_bde + 1) == nseg)
				bf_set(lpfc_sli4_sge_last, sgl, 1);
			else
				bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3338
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3339
			sgl->word2 = cpu_to_le32(sgl->word2);
3340
			sgl->sge_len = cpu_to_le32(dma_len);
3341 3342 3343
			dma_offset += dma_len;
			sgl++;
		}
3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354
		/* setup the performance hint (first data BDE) if enabled */
		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
			bde = (struct ulp_bde64 *)
					&(iocb_cmd->unsli3.sli3Words[5]);
			bde->addrLow = first_data_sgl->addr_lo;
			bde->addrHigh = first_data_sgl->addr_hi;
			bde->tus.f.bdeSize =
					le32_to_cpu(first_data_sgl->sge_len);
			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			bde->tus.w = cpu_to_le32(bde->tus.w);
		}
3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375
	} else {
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized.
	 * all iocb memory resources are reused.
	 */
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3376 3377 3378 3379 3380

	/*
	 * If the OAS driver feature is enabled and the lun is enabled for
	 * OAS, set the oas iocb related flags.
	 */
3381
	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3382
		scsi_cmnd->device->hostdata)->oas_enabled)
3383
		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3384 3385 3386
	return 0;
}

3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403
/**
 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
 **/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3404
	uint32_t num_sge = 0;
3405 3406 3407 3408 3409 3410
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
	int fcpdl;

	/*
	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3411
	 *  fcp_rsp regions to the first data sge entry
3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);

		sgl += 1;
		lpfc_cmd->seg_cnt = datasegcnt;
3434 3435 3436 3437

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
3438 3439 3440 3441 3442

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
3443 3444 3445 3446 3447
			/* Here we need to add a DISEED to the count */
			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
				goto err;

			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3448
					datasegcnt);
3449

3450
			/* we should have 2 or more entries in buffer list */
3451
			if (num_sge < 2)
3452 3453
				goto err;
			break;
3454 3455

		case LPFC_PG_TYPE_DIF_BUF:
3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
3470 3471 3472 3473 3474 3475 3476
			/*
			 * There is a minimun of 3 SGEs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 3) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
3477

3478
			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3479
					datasegcnt, protsegcnt);
3480

3481
			/* we should have 3 or more entries in buffer list */
3482 3483
			if ((num_sge < 3) ||
			    (num_sge > phba->cfg_total_seg_cnt))
3484 3485
				goto err;
			break;
3486

3487 3488
		case LPFC_PG_TYPE_INVALID:
		default:
3489 3490 3491
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

3492 3493 3494 3495 3496 3497 3498
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9083 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513
	switch (scsi_get_prot_op(scsi_cmnd)) {
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_STRIP:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
		break;
	case SCSI_PROT_WRITE_INSERT:
	case SCSI_PROT_READ_INSERT:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
		break;
	case SCSI_PROT_WRITE_PASS:
	case SCSI_PROT_READ_PASS:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
		break;
	}

3514 3515 3516 3517 3518 3519 3520 3521 3522
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

3523 3524 3525 3526 3527 3528 3529 3530
	/*
	 * If the OAS driver feature is enabled and the lun is enabled for
	 * OAS, set the oas iocb related flags.
	 */
	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
		scsi_cmnd->device->hostdata)->oas_enabled)
		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);

3531 3532
	return 0;
err:
3533 3534 3535 3536 3537 3538 3539
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

3540
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3541 3542 3543 3544 3545 3546 3547 3548
			"9084 Cannot setup S/G List for HBA"
			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
			prot_group_type, num_sge);

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
3549 3550 3551
	return 1;
}

3552 3553 3554 3555 3556 3557 3558 3559 3560
/**
 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
3561 3562
 *	1 - Error
 *	0 - Success
3563 3564 3565 3566 3567 3568 3569
 **/
static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588
/**
 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * using BlockGuard.
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
 *	1 - Error
 *	0 - Success
 **/
static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3589
/**
3590
 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610
 * @phba: Pointer to hba context object.
 * @vport: Pointer to vport object.
 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
 * @rsp_iocb: Pointer to response iocb object which reported error.
 *
 * This function posts an event when there is a SCSI command reporting
 * error from the scsi device.
 **/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
	struct lpfc_fast_path_event *fast_path_evt = NULL;
	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
	unsigned long flags;

3611 3612 3613
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682
	/* If there is queuefull or busy condition send a scsi event */
	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
		(cmnd->result == SAM_STAT_BUSY)) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.scsi_evt.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.scsi_evt.subcategory =
		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
			LPFC_EVENT_CHECK_COND;
		fast_path_evt->un.check_cond_evt.scsi_event.lun =
			cmnd->device->lun;
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.check_cond_evt.sense_key =
			cmnd->sense_buffer[2] & 0xf;
		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
		     fcpi_parm &&
		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
			((scsi_status == SAM_STAT_GOOD) &&
			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
		/*
		 * If status is good or resid does not match with fcp_param and
		 * there is valid fcpi_parm, then there is a read_check error
		 */
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.read_check_error.header.event_type =
			FC_REG_FABRIC_EVENT;
		fast_path_evt->un.read_check_error.header.subcategory =
			LPFC_EVENT_FCPRDCHKERR;
		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
		fast_path_evt->un.read_check_error.fcpiparam =
			fcpi_parm;
	} else
		return;

	fast_path_evt->vport = vport;
	spin_lock_irqsave(&phba->hbalock, flags);
	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	lpfc_worker_wake_up(phba);
	return;
}
3683 3684

/**
3685
 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3686
 * @phba: The HBA for which this call is being executed.
3687 3688 3689
 * @psb: The scsi buffer which is going to be un-mapped.
 *
 * This routine does DMA un-mapping of scatter gather list of scsi command
3690
 * field of @lpfc_cmd for device with SLI-3 interface spec.
3691
 **/
3692
static void
3693
lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3694 3695 3696 3697 3698 3699 3700
{
	/*
	 * There are only two special cases to consider.  (1) the scsi command
	 * requested scatter-gather usage or (2) the scsi command allocated
	 * a request buffer, but did not request use_sg.  There is a third
	 * case, but it does not require resource deallocation.
	 */
3701 3702
	if (psb->seg_cnt > 0)
		scsi_dma_unmap(psb->pCmd);
3703 3704 3705 3706
	if (psb->prot_seg_cnt > 0)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
				scsi_prot_sg_count(psb->pCmd),
				psb->pCmd->sc_data_direction);
3707 3708
}

3709
/**
3710
 * lpfc_handler_fcp_err - FCP response handler
3711 3712 3713 3714 3715 3716 3717 3718
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @rsp_iocb: The response IOCB which contains FCP error.
 *
 * This routine is called to process response IOCB with status field
 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
 * based upon SCSI and FCP error.
 **/
已提交
3719
static void
J
James Smart 已提交
3720 3721
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
		    struct lpfc_iocbq *rsp_iocb)
已提交
3722 3723 3724 3725
{
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3726
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
已提交
3727 3728
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
3729
	uint32_t *lp;
已提交
3730 3731
	uint32_t host_status = DID_OK;
	uint32_t rsplen = 0;
3732
	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
已提交
3733

3734

已提交
3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
	/*
	 *  If this is a task management command, there is no
	 *  scsi packet associated with this lpfc_cmd.  The driver
	 *  consumes it.
	 */
	if (fcpcmd->fcpCntl2) {
		scsi_status = 0;
		goto out;
	}

3745 3746
	if (resp_info & RSP_LEN_VALID) {
		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3747
		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3748 3749
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2719 Invalid response length: "
H
Hannes Reinecke 已提交
3750
				 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3751 3752 3753 3754 3755 3756
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 rsplen);
			host_status = DID_ERROR;
			goto out;
		}
3757 3758 3759 3760
		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2757 Protocol failure detected during "
				 "processing of FCP I/O op: "
H
Hannes Reinecke 已提交
3761
				 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3762 3763 3764 3765 3766 3767
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 fcprsp->rspInfo3);
			host_status = DID_ERROR;
			goto out;
		}
3768 3769
	}

3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780
	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
		if (snslen > SCSI_SENSE_BUFFERSIZE)
			snslen = SCSI_SENSE_BUFFERSIZE;

		if (resp_info & RSP_LEN_VALID)
		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
	}
	lp = (uint32_t *)cmnd->sense_buffer;

3781 3782 3783 3784 3785 3786 3787 3788 3789
	/* special handling for under run conditions */
	if (!scsi_status && (resp_info & RESID_UNDER)) {
		/* don't log under runs if fcp set... */
		if (vport->cfg_log_verbose & LOG_FCP)
			logit = LOG_FCP_ERROR;
		/* unless operator says so */
		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
			logit = LOG_FCP_UNDER;
	}
3790

3791
	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3792
			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3793 3794 3795 3796 3797 3798 3799
			 "Data: x%x x%x x%x x%x x%x\n",
			 cmnd->cmnd[0], scsi_status,
			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
			 be32_to_cpu(fcprsp->rspResId),
			 be32_to_cpu(fcprsp->rspSnsLen),
			 be32_to_cpu(fcprsp->rspRspLen),
			 fcprsp->rspInfo3);
已提交
3800

3801
	scsi_set_resid(cmnd, 0);
已提交
3802
	if (resp_info & RESID_UNDER) {
3803
		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
已提交
3804

3805
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3806
				 "9025 FCP Read Underrun, expected %d, "
3807 3808 3809 3810
				 "residual %d Data: x%x x%x x%x\n",
				 be32_to_cpu(fcpcmd->fcpDl),
				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
				 cmnd->underflow);
已提交
3811

3812 3813 3814 3815 3816 3817 3818
		/*
		 * If there is an under run check if under run reported by
		 * storage array is same as the under run reported by HBA.
		 * If this is not same, there is a dropped frame.
		 */
		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
			fcpi_parm &&
3819
			(scsi_get_resid(cmnd) != fcpi_parm)) {
3820 3821
			lpfc_printf_vlog(vport, KERN_WARNING,
					 LOG_FCP | LOG_FCP_ERROR,
3822
					 "9026 FCP Read Check Error "
3823 3824 3825 3826
					 "and Underrun Data: x%x x%x x%x x%x\n",
					 be32_to_cpu(fcpcmd->fcpDl),
					 scsi_get_resid(cmnd), fcpi_parm,
					 cmnd->cmnd[0]);
3827
			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3828 3829
			host_status = DID_ERROR;
		}
已提交
3830 3831
		/*
		 * The cmnd->underflow is the minimum number of bytes that must
L
Lucas De Marchi 已提交
3832
		 * be transferred for this command.  Provided a sense condition
已提交
3833 3834 3835 3836 3837
		 * is not present, make sure the actual amount transferred is at
		 * least the underflow value or fail.
		 */
		if (!(resp_info & SNS_LEN_VALID) &&
		    (scsi_status == SAM_STAT_GOOD) &&
3838 3839
		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
		     < cmnd->underflow)) {
3840
			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3841
					 "9027 FCP command x%x residual "
3842 3843
					 "underrun converted to error "
					 "Data: x%x x%x x%x\n",
3844
					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3845
					 scsi_get_resid(cmnd), cmnd->underflow);
已提交
3846 3847 3848
			host_status = DID_ERROR;
		}
	} else if (resp_info & RESID_OVER) {
3849
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3850
				 "9028 FCP command x%x residual overrun error. "
3851
				 "Data: x%x x%x\n", cmnd->cmnd[0],
3852
				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
已提交
3853 3854 3855 3856
		host_status = DID_ERROR;

	/*
	 * Check SLI validation that all the transfer was actually done
3857
	 * (fcpi_parm should be zero). Apply check only to reads.
已提交
3858
	 */
3859
	} else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
3860
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3861
				 "9029 FCP Read Check Error Data: "
J
James Smart 已提交
3862
				 "x%x x%x x%x x%x x%x\n",
3863 3864
				 be32_to_cpu(fcpcmd->fcpDl),
				 be32_to_cpu(fcprsp->rspResId),
J
James Smart 已提交
3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876
				 fcpi_parm, cmnd->cmnd[0], scsi_status);
		switch (scsi_status) {
		case SAM_STAT_GOOD:
		case SAM_STAT_CHECK_CONDITION:
			/* Fabric dropped a data frame. Fail any successful
			 * command in which we detected dropped frames.
			 * A status of good or some check conditions could
			 * be considered a successful command.
			 */
			host_status = DID_ERROR;
			break;
		}
3877
		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
已提交
3878 3879 3880 3881
	}

 out:
	cmnd->result = ScsiResult(host_status, scsi_status);
3882
	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
已提交
3883 3884
}

3885
/**
3886
 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3887 3888
 * @phba: The Hba for which this call is being executed.
 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3889
 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3890 3891 3892 3893 3894
 *
 * This routine assigns scsi command result by looking into response IOCB
 * status field appropriately. This routine handles QUEUE FULL condition as
 * well by ramping down device queue depth.
 **/
已提交
3895 3896 3897 3898 3899 3900
static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
			struct lpfc_iocbq *pIocbOut)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) pIocbIn->context1;
J
James Smart 已提交
3901
	struct lpfc_vport      *vport = pIocbIn->vport;
已提交
3902 3903
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
3904
	struct scsi_cmnd *cmd;
3905
	int result;
3906
	int depth;
3907
	unsigned long flags;
3908
	struct lpfc_fast_path_event *fast_path_evt;
3909
	struct Scsi_Host *shost;
3910
	uint32_t queue_depth, scsi_id;
3911
	uint32_t logit = LOG_FCP;
已提交
3912

3913 3914 3915 3916 3917 3918
	/* Sanity check on return of outstanding command */
	if (!(lpfc_cmd->pCmd))
		return;
	cmd = lpfc_cmd->pCmd;
	shost = cmd->device->host;

3919
	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
已提交
3920
	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3921 3922 3923
	/* pick up SLI4 exhange busy status from HBA */
	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;

3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
	if (lpfc_cmd->prot_data_type) {
		struct scsi_dif_tuple *src = NULL;

		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
		/*
		 * Used to restore any changes to protection
		 * data for error injection.
		 */
		switch (lpfc_cmd->prot_data_type) {
		case LPFC_INJERR_REFTAG:
			src->ref_tag =
				lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_APPTAG:
			src->app_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_GUARD:
			src->guard_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		default:
			break;
		}

		lpfc_cmd->prot_data = 0;
		lpfc_cmd->prot_data_type = 0;
		lpfc_cmd->prot_data_segment = NULL;
	}
#endif
3955 3956
	if (pnode && NLP_CHK_NODE_ACT(pnode))
		atomic_dec(&pnode->cmd_pending);
已提交
3957 3958 3959 3960 3961 3962 3963

	if (lpfc_cmd->status) {
		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
		    (lpfc_cmd->result & IOERR_DRVR_MASK))
			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
		else if (lpfc_cmd->status >= IOSTAT_CNT)
			lpfc_cmd->status = IOSTAT_DEFAULT;
3964 3965 3966 3967
		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3968 3969 3970 3971
			logit = 0;
		else
			logit = LOG_FCP | LOG_FCP_UNDER;
		lpfc_printf_vlog(vport, KERN_WARNING, logit,
H
Hannes Reinecke 已提交
3972
			 "9030 FCP cmd x%x failed <%d/%lld> "
3973 3974 3975
			 "status: x%x result: x%x "
			 "sid: x%x did: x%x oxid: x%x "
			 "Data: x%x x%x\n",
3976 3977 3978 3979
			 cmd->cmnd[0],
			 cmd->device ? cmd->device->id : 0xffff,
			 cmd->device ? cmd->device->lun : 0xffff,
			 lpfc_cmd->status, lpfc_cmd->result,
3980 3981
			 vport->fc_myDID,
			 (pnode) ? pnode->nlp_DID : 0,
3982 3983
			 phba->sli_rev == LPFC_SLI_REV4 ?
			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3984 3985
			 pIocbOut->iocb.ulpContext,
			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
已提交
3986 3987 3988 3989

		switch (lpfc_cmd->status) {
		case IOSTAT_FCP_RSP_ERROR:
			/* Call FCP RSP handler to determine result */
J
James Smart 已提交
3990
			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
已提交
3991 3992 3993
			break;
		case IOSTAT_NPORT_BSY:
		case IOSTAT_FABRIC_BSY:
3994
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018
			fast_path_evt = lpfc_alloc_fast_evt(phba);
			if (!fast_path_evt)
				break;
			fast_path_evt->un.fabric_evt.event_type =
				FC_REG_FABRIC_EVENT;
			fast_path_evt->un.fabric_evt.subcategory =
				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
					&pnode->nlp_portname,
					sizeof(struct lpfc_name));
				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
					&pnode->nlp_nodename,
					sizeof(struct lpfc_name));
			}
			fast_path_evt->vport = vport;
			fast_path_evt->work_evt.evt =
				LPFC_EVT_FASTPATH_MGMT_EVT;
			spin_lock_irqsave(&phba->hbalock, flags);
			list_add_tail(&fast_path_evt->work_evt.evt_listp,
				&phba->work_list);
			spin_unlock_irqrestore(&phba->hbalock, flags);
			lpfc_worker_wake_up(phba);
已提交
4019
			break;
4020
		case IOSTAT_LOCAL_REJECT:
4021
		case IOSTAT_REMOTE_STOP:
4022 4023 4024 4025 4026 4027 4028 4029 4030
			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
				break;
			}
4031
			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4032
			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4033 4034
			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4035
				cmd->result = ScsiResult(DID_REQUEUE, 0);
4036
				break;
4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052
			}
			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
					/*
					 * This is a response for a BG enabled
					 * cmd. Parse BG error
					 */
					lpfc_parse_bg_err(phba, lpfc_cmd,
							pIocbOut);
					break;
				} else {
					lpfc_printf_vlog(vport, KERN_WARNING,
							LOG_BG,
							"9031 non-zero BGSTAT "
4053
							"on unprotected cmd\n");
4054 4055
				}
			}
4056 4057 4058 4059 4060 4061 4062 4063
			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
				&& (phba->sli_rev == LPFC_SLI_REV4)
				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
				/* This IO was aborted by the target, we don't
				 * know the rxid and because we did not send the
				 * ABTS we cannot generate and RRQ.
				 */
				lpfc_set_rrq_active(phba, pnode,
4064 4065
					lpfc_cmd->cur_iocbq.sli4_lxritag,
					0, 0);
4066
			}
4067
		/* else: fall through */
已提交
4068 4069 4070 4071 4072
		default:
			cmd->result = ScsiResult(DID_ERROR, 0);
			break;
		}

4073
		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4074
		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4075 4076
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
						 SAM_STAT_BUSY);
4077
	} else
已提交
4078 4079 4080 4081 4082
		cmd->result = ScsiResult(DID_OK, 0);

	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
		uint32_t *lp = (uint32_t *)cmd->sense_buffer;

4083
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
H
Hannes Reinecke 已提交
4084
				 "0710 Iodone <%d/%llu> cmd %p, error "
4085 4086 4087 4088
				 "x%x SNS x%x x%x Data: x%x x%x\n",
				 cmd->device->id, cmd->device->lun, cmd,
				 cmd->result, *lp, *(lp + 3), cmd->retries,
				 scsi_get_resid(cmd));
已提交
4089 4090
	}

4091
	lpfc_update_stats(phba, lpfc_cmd);
4092
	result = cmd->result;
4093 4094 4095
	if (vport->cfg_max_scsicmpl_time &&
	   time_after(jiffies, lpfc_cmd->start_time +
		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4096
		spin_lock_irqsave(shost->host_lock, flags);
4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108
		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
			if (pnode->cmd_qdepth >
				atomic_read(&pnode->cmd_pending) &&
				(atomic_read(&pnode->cmd_pending) >
				LPFC_MIN_TGT_QDEPTH) &&
				((cmd->cmnd[0] == READ_10) ||
				(cmd->cmnd[0] == WRITE_10)))
				pnode->cmd_qdepth =
					atomic_read(&pnode->cmd_pending);

			pnode->last_change_time = jiffies;
		}
4109
		spin_unlock_irqrestore(shost->host_lock, flags);
4110
	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4111
		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
4112
		   time_after(jiffies, pnode->last_change_time +
4113
			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4114
			spin_lock_irqsave(shost->host_lock, flags);
4115 4116 4117 4118 4119 4120
			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
				/ 100;
			depth = depth ? depth : 1;
			pnode->cmd_qdepth += depth;
			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4121
			pnode->last_change_time = jiffies;
4122
			spin_unlock_irqrestore(shost->host_lock, flags);
4123
		}
4124 4125
	}

4126
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4127 4128 4129 4130

	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
	queue_depth = cmd->device->queue_depth;
	scsi_id = cmd->device->id;
4131 4132
	cmd->scsi_done(cmd);

4133
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4134
		spin_lock_irqsave(&phba->hbalock, flags);
4135
		lpfc_cmd->pCmd = NULL;
4136
		spin_unlock_irqrestore(&phba->hbalock, flags);
4137

4138 4139 4140 4141
		/*
		 * If there is a thread waiting for command completion
		 * wake up the thread.
		 */
4142
		spin_lock_irqsave(shost->host_lock, flags);
4143 4144
		if (lpfc_cmd->waitq)
			wake_up(lpfc_cmd->waitq);
4145
		spin_unlock_irqrestore(shost->host_lock, flags);
4146 4147 4148 4149
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return;
	}

4150
	spin_lock_irqsave(&phba->hbalock, flags);
4151
	lpfc_cmd->pCmd = NULL;
4152
	spin_unlock_irqrestore(&phba->hbalock, flags);
4153

4154 4155 4156 4157
	/*
	 * If there is a thread waiting for command completion
	 * wake up the thread.
	 */
4158
	spin_lock_irqsave(shost->host_lock, flags);
4159 4160
	if (lpfc_cmd->waitq)
		wake_up(lpfc_cmd->waitq);
4161
	spin_unlock_irqrestore(shost->host_lock, flags);
4162

4163
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4164 4165
}

4166
/**
4167
 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183
 * @data: A pointer to the immediate command data portion of the IOCB.
 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
 *
 * The routine copies the entire FCP command from @fcp_cmnd to @data while
 * byte swapping the data to big endian format for transmission on the wire.
 **/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
	int i, j;
	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
	     i += sizeof(uint32_t), j++) {
		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
	}
}

4184
/**
4185
 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4186 4187 4188 4189 4190
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: The scsi command which needs to send.
 * @pnode: Pointer to lpfc_nodelist.
 *
 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4191
 * to transfer for device with SLI3 interface spec.
4192
 **/
已提交
4193
static void
4194
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
J
James Smart 已提交
4195
		    struct lpfc_nodelist *pnode)
已提交
4196
{
J
James Smart 已提交
4197
	struct lpfc_hba *phba = vport->phba;
已提交
4198 4199 4200 4201 4202
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
	int datadir = scsi_cmnd->sc_data_direction;
4203 4204
	uint8_t *ptr;
	bool sli4;
4205
	uint32_t fcpdl;
已提交
4206

4207 4208 4209
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

已提交
4210
	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4211 4212
	/* clear task management bits */
	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
已提交
4213

4214 4215
	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
			&lpfc_cmd->fcp_cmnd->fcp_lun);
已提交
4216

4217 4218 4219 4220 4221 4222 4223
	ptr = &fcp_cmnd->fcpCdb[0];
	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
		ptr += scsi_cmnd->cmd_len;
		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
	}

4224
	fcp_cmnd->fcpCntl1 = SIMPLE_Q;
已提交
4225

4226
	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4227
	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4228

已提交
4229 4230 4231 4232 4233 4234
	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
4235
	if (scsi_sg_count(scsi_cmnd)) {
已提交
4236 4237
		if (datadir == DMA_TO_DEVICE) {
			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4238
			iocb_cmd->ulpPU = PARM_READ_CHECK;
4239 4240
			if (vport->cfg_first_burst_size &&
			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
4241 4242 4243 4244 4245 4246
				fcpdl = scsi_bufflen(scsi_cmnd);
				if (fcpdl < vport->cfg_first_burst_size)
					piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
				else
					piocbq->iocb.un.fcpi.fcpi_XRdy =
						vport->cfg_first_burst_size;
4247
			}
已提交
4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262
			fcp_cmnd->fcpCntl3 = WRITE_DATA;
			phba->fc4OutputRequests++;
		} else {
			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
			iocb_cmd->ulpPU = PARM_READ_CHECK;
			fcp_cmnd->fcpCntl3 = READ_DATA;
			phba->fc4InputRequests++;
		}
	} else {
		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
		iocb_cmd->un.fcpi.fcpi_parm = 0;
		iocb_cmd->ulpPU = 0;
		fcp_cmnd->fcpCntl3 = 0;
		phba->fc4ControlRequests++;
	}
4263 4264
	if (phba->sli_rev == 3 &&
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4265
		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4266 4267 4268 4269 4270
	/*
	 * Finish initializing those IOCB fields that are independent
	 * of the scsi_cmnd request_buffer
	 */
	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4271
	if (sli4)
4272 4273
		piocbq->iocb.ulpContext =
		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
已提交
4274 4275
	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
		piocbq->iocb.ulpFCP2Rcvy = 1;
4276 4277
	else
		piocbq->iocb.ulpFCP2Rcvy = 0;
已提交
4278 4279 4280 4281 4282

	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
	piocbq->context1  = lpfc_cmd;
	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
J
James Smart 已提交
4283
	piocbq->vport = vport;
已提交
4284 4285
}

4286
/**
4287
 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4288 4289 4290 4291 4292
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @lun: Logical unit number.
 * @task_mgmt_cmd: SCSI task management command.
 *
4293 4294
 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
 * for device with SLI-3 interface spec.
4295 4296 4297 4298 4299
 *
 * Return codes:
 *   0 - Error
 *   1 - Success
 **/
已提交
4300
static int
4301
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
已提交
4302
			     struct lpfc_scsi_buf *lpfc_cmd,
H
Hannes Reinecke 已提交
4303
			     uint64_t lun,
已提交
4304 4305 4306 4307 4308
			     uint8_t task_mgmt_cmd)
{
	struct lpfc_iocbq *piocbq;
	IOCB_t *piocb;
	struct fcp_cmnd *fcp_cmnd;
4309
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
已提交
4310 4311
	struct lpfc_nodelist *ndlp = rdata->pnode;

4312 4313
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
已提交
4314 4315 4316
		return 0;

	piocbq = &(lpfc_cmd->cur_iocbq);
J
James Smart 已提交
4317 4318
	piocbq->vport = vport;

已提交
4319 4320 4321
	piocb = &piocbq->iocb;

	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4322 4323 4324
	/* Clear out any old data in the FCP command area */
	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
已提交
4325
	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4326 4327
	if (vport->phba->sli_rev == 3 &&
	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4328
		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4329 4330
	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
	piocb->ulpContext = ndlp->nlp_rpi;
4331 4332 4333 4334
	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
		piocb->ulpContext =
		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
	}
4335
	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
已提交
4336
	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4337 4338
	piocb->ulpPU = 0;
	piocb->un.fcpi.fcpi_parm = 0;
已提交
4339 4340 4341 4342 4343 4344 4345 4346

	/* ulpTimeout is only one byte */
	if (lpfc_cmd->timeout > 0xff) {
		/*
		 * Do not timeout the command at the firmware level.
		 * The driver will provide the timeout mechanism.
		 */
		piocb->ulpTimeout = 0;
4347
	} else
已提交
4348
		piocb->ulpTimeout = lpfc_cmd->timeout;
4349

4350 4351
	if (vport->phba->sli_rev == LPFC_SLI_REV4)
		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4352

4353
	return 1;
4354 4355 4356
}

/**
L
Lucas De Marchi 已提交
4357
 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368
 * @phba: The hba struct for which this call is being executed.
 * @dev_grp: The HBA PCI-Device group number.
 *
 * This routine sets up the SCSI interface API function jump table in @phba
 * struct.
 * Returns: 0 - success, -ENODEV - failure.
 **/
int
lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{

4369 4370 4371
	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;

4372 4373 4374 4375
	switch (dev_grp) {
	case LPFC_PCI_DEV_LP:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4376
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4377
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4378
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4379
		break;
4380 4381 4382
	case LPFC_PCI_DEV_OC:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4383
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4384
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4385
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4386
		break;
4387 4388 4389 4390 4391 4392 4393 4394
	default:
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"1418 Invalid HBA PCI-device group: 0x%x\n",
				dev_grp);
		return -ENODEV;
		break;
	}
	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4395
	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4396 4397 4398
	return 0;
}

4399
/**
4400
 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4401 4402 4403 4404 4405 4406 4407
 * @phba: The Hba for which this call is being executed.
 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
 * @rspiocbq: Pointer to lpfc_iocbq data structure.
 *
 * This routine is IOCB completion routine for device reset and target reset
 * routine. This routine release scsi buffer associated with lpfc_cmd.
 **/
4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
			struct lpfc_iocbq *cmdiocbq,
			struct lpfc_iocbq *rspiocbq)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) cmdiocbq->context1;
	if (lpfc_cmd)
		lpfc_release_scsi_buf(phba, lpfc_cmd);
	return;
}

4420
/**
4421
 * lpfc_info - Info entry point of scsi_host_template data structure
4422 4423 4424 4425 4426 4427 4428
 * @host: The scsi host for which this call is being executed.
 *
 * This routine provides module information about hba.
 *
 * Reutrn code:
 *   Pointer to char - Success.
 **/
已提交
4429 4430 4431
const char *
lpfc_info(struct Scsi_Host *host)
{
J
James Smart 已提交
4432 4433
	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4434
	int len, link_speed = 0;
已提交
4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453
	static char  lpfcinfobuf[384];

	memset(lpfcinfobuf,0,384);
	if (phba && phba->pcidev){
		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
		len = strlen(lpfcinfobuf);
		snprintf(lpfcinfobuf + len,
			384-len,
			" on PCI bus %02x device %02x irq %d",
			phba->pcidev->bus->number,
			phba->pcidev->devfn,
			phba->pcidev->irq);
		len = strlen(lpfcinfobuf);
		if (phba->Port[0]) {
			snprintf(lpfcinfobuf + len,
				 384-len,
				 " port %s",
				 phba->Port);
		}
4454
		len = strlen(lpfcinfobuf);
4455 4456 4457 4458 4459 4460 4461 4462
		if (phba->sli_rev <= LPFC_SLI_REV3) {
			link_speed = lpfc_sli_port_speed_get(phba);
		} else {
			if (phba->sli4_hba.link_state.logical_speed)
				link_speed =
				      phba->sli4_hba.link_state.logical_speed;
			else
				link_speed = phba->sli4_hba.link_state.speed;
4463
		}
4464 4465 4466
		if (link_speed != 0)
			snprintf(lpfcinfobuf + len, 384-len,
				 " Logical Link Speed: %d Mbps", link_speed);
已提交
4467 4468 4469 4470
	}
	return lpfcinfobuf;
}

4471
/**
4472
 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4473 4474 4475 4476 4477
 * @phba: The Hba for which this call is being executed.
 *
 * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
 * The default value of cfg_poll_tmo is 10 milliseconds.
 **/
4478 4479 4480 4481 4482
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
	unsigned long  poll_tmo_expires =
		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));

4483
	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4484 4485 4486 4487
		mod_timer(&phba->fcp_poll_timer,
			  poll_tmo_expires);
}

4488
/**
4489
 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4490 4491 4492 4493
 * @phba: The Hba for which this call is being executed.
 *
 * This routine starts the fcp_poll_timer of @phba.
 **/
4494 4495 4496 4497 4498
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
	lpfc_poll_rearm_timer(phba);
}

4499
/**
4500
 * lpfc_poll_timeout - Restart polling timer
4501 4502 4503 4504 4505 4506
 * @ptr: Map to lpfc_hba data structure pointer.
 *
 * This routine restarts fcp_poll timer, when FCP ring  polling is enable
 * and FCP Ring interrupt is disable.
 **/

4507 4508
void lpfc_poll_timeout(unsigned long ptr)
{
J
James Smart 已提交
4509
	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4510 4511

	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4512 4513 4514
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);

4515 4516 4517 4518 4519
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}
}

4520
/**
4521
 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532
 * @cmnd: Pointer to scsi_cmnd data structure.
 * @done: Pointer to done routine.
 *
 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
 * This routine prepares an IOCB from scsi command and provides to firmware.
 * The @done callback is invoked after driver finished processing the command.
 *
 * Return value :
 *   0 - Success
 *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
 **/
已提交
4533
static int
4534
lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
已提交
4535
{
J
James Smart 已提交
4536 4537
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4538
	struct lpfc_rport_data *rdata;
4539
	struct lpfc_nodelist *ndlp;
4540
	struct lpfc_scsi_buf *lpfc_cmd;
4541 4542
	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
	int err;
已提交
4543

4544
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4545 4546 4547
	err = fc_remote_port_chkready(rport);
	if (err) {
		cmnd->result = err;
已提交
4548 4549
		goto out_fail_command;
	}
4550
	ndlp = rdata->pnode;
已提交
4551

4552
	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4553
		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4554

4555 4556 4557 4558
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
				" op:%02x str=%s without registering for"
				" BlockGuard - Rejecting command\n",
4559 4560 4561 4562 4563
				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
				dif_op_str[scsi_get_prot_op(cmnd)]);
		goto out_fail_command;
	}

已提交
4564
	/*
4565 4566
	 * Catch race where our node has transitioned, but the
	 * transport is still transitioning.
已提交
4567
	 */
4568 4569
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
		goto out_tgt_busy;
4570
	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4571
		goto out_tgt_busy;
4572

4573
	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
已提交
4574
	if (lpfc_cmd == NULL) {
4575
		lpfc_rampdown_queue_depth(phba);
4576

4577 4578 4579
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0707 driver's buffer pool is empty, "
				 "IO busied\n");
已提交
4580 4581 4582 4583 4584 4585 4586 4587 4588 4589
		goto out_host_busy;
	}

	/*
	 * Store the midlayer's command structure for the completion phase
	 * and complete the command initialization.
	 */
	lpfc_cmd->pCmd  = cmnd;
	lpfc_cmd->rdata = rdata;
	lpfc_cmd->timeout = 0;
4590
	lpfc_cmd->start_time = jiffies;
已提交
4591 4592
	cmnd->host_scribble = (unsigned char *)lpfc_cmd;

4593
	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4594
		if (vport->phba->cfg_enable_bg) {
4595 4596
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4597 4598 4599 4600 4601 4602 4603
					 "9033 BLKGRD: rcvd %s cmd:x%x "
					 "sector x%llx cnt %u pt %x\n",
					 dif_op_str[scsi_get_prot_op(cmnd)],
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
					 blk_rq_sectors(cmnd->request),
					 (cmnd->cmnd[1]>>5));
4604
		}
4605 4606
		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
	} else {
4607
		if (vport->phba->cfg_enable_bg) {
4608 4609
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4610 4611 4612 4613
					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
					 "x%x sector x%llx cnt %u pt %x\n",
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
4614
					 blk_rq_sectors(cmnd->request),
4615
					 (cmnd->cmnd[1]>>5));
4616
		}
4617 4618 4619
		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
	}

已提交
4620 4621 4622
	if (err)
		goto out_host_busy_free_buf;

J
James Smart 已提交
4623
	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
已提交
4624

4625
	atomic_inc(&ndlp->cmd_pending);
4626
	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4627
				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4628 4629
	if (err) {
		atomic_dec(&ndlp->cmd_pending);
4630 4631
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "3376 FCP could not issue IOCB err %x"
H
Hannes Reinecke 已提交
4632
				 "FCP cmd x%x <%d/%llu> "
4633 4634 4635 4636
				 "sid: x%x did: x%x oxid: x%x "
				 "Data: x%x x%x x%x x%x\n",
				 err, cmnd->cmnd[0],
				 cmnd->device ? cmnd->device->id : 0xffff,
H
Hannes Reinecke 已提交
4637
				 cmnd->device ? cmnd->device->lun : (u64) -1,
4638 4639 4640 4641 4642 4643 4644 4645 4646 4647
				 vport->fc_myDID, ndlp->nlp_DID,
				 phba->sli_rev == LPFC_SLI_REV4 ?
				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
				 (uint32_t)
				 (cmnd->request->timeout / 1000));


已提交
4648
		goto out_host_busy_free_buf;
4649
	}
4650
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4651 4652 4653
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);

4654 4655 4656 4657
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
4658 4659 4660
	return 0;

 out_host_busy_free_buf:
4661
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4662
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4663 4664 4665
 out_host_busy:
	return SCSI_MLQUEUE_HOST_BUSY;

4666 4667 4668
 out_tgt_busy:
	return SCSI_MLQUEUE_TARGET_BUSY;

已提交
4669
 out_fail_command:
4670
	cmnd->scsi_done(cmnd);
已提交
4671 4672 4673
	return 0;
}

J
Jeff Garzik 已提交
4674

4675
/**
4676
 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4677 4678 4679 4680 4681 4682 4683 4684
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine aborts @cmnd pending in base driver.
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
已提交
4685
static int
4686
lpfc_abort_handler(struct scsi_cmnd *cmnd)
已提交
4687
{
J
James Smart 已提交
4688 4689 4690
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4691 4692
	struct lpfc_iocbq *iocb;
	struct lpfc_iocbq *abtsiocb;
已提交
4693 4694
	struct lpfc_scsi_buf *lpfc_cmd;
	IOCB_t *cmd, *icmd;
4695
	int ret = SUCCESS, status = 0;
4696 4697 4698
	struct lpfc_sli_ring *pring_s4;
	int ring_number, ret_val;
	unsigned long flags, iflags;
4699
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
已提交
4700

4701
	status = fc_block_scsi_eh(cmnd);
4702
	if (status != 0 && status != SUCCESS)
4703
		return status;
4704

4705
	spin_lock_irqsave(&phba->hbalock, flags);
4706 4707
	/* driver queued commands are in process of being flushed */
	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4708
		spin_unlock_irqrestore(&phba->hbalock, flags);
4709 4710 4711 4712 4713 4714
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3168 SCSI Layer abort requested I/O has been "
			"flushed by LLD.\n");
		return FAILED;
	}

4715
	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4716
	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4717
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
4718 4719
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
H
Hannes Reinecke 已提交
4720
			 "x%x ID %d LUN %llu\n",
4721
			 SUCCESS, cmnd->device->id, cmnd->device->lun);
J
James Smart 已提交
4722 4723
		return SUCCESS;
	}
已提交
4724

4725 4726 4727
	iocb = &lpfc_cmd->cur_iocbq;
	/* the command is in process of being cancelled */
	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4728
		spin_unlock_irqrestore(&phba->hbalock, flags);
4729 4730 4731 4732 4733
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3169 SCSI Layer abort requested I/O has been "
			"cancelled by LLD.\n");
		return FAILED;
	}
4734 4735 4736 4737
	/*
	 * If pCmd field of the corresponding lpfc_scsi_buf structure
	 * points to a different SCSI command, then the driver has
	 * already completed this command, but the midlayer did not
4738
	 * see the completion before the eh fired. Just return SUCCESS.
4739
	 */
4740 4741 4742 4743 4744 4745
	if (lpfc_cmd->pCmd != cmnd) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3170 SCSI Layer abort requested I/O has been "
			"completed by LLD.\n");
		goto out_unlock;
	}
已提交
4746

4747
	BUG_ON(iocb->context1 != lpfc_cmd);
已提交
4748

4749 4750 4751 4752 4753 4754 4755 4756
	/* abort issued in recovery is still in progress */
	if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "3389 SCSI Layer I/O Abort Request is pending\n");
		spin_unlock_irqrestore(&phba->hbalock, flags);
		goto wait_for_cmpl;
	}

4757
	abtsiocb = __lpfc_sli_get_iocbq(phba);
4758 4759
	if (abtsiocb == NULL) {
		ret = FAILED;
4760
		goto out_unlock;
已提交
4761 4762
	}

4763 4764 4765
	/* Indicate the IO is being aborted by the driver. */
	iocb->iocb_flag |= LPFC_DRIVER_ABORTED;

已提交
4766
	/*
4767 4768 4769
	 * The scsi command can not be in txq and it is in flight because the
	 * pCmd is still pointig at the SCSI command we have to abort. There
	 * is no need to search the txcmplq. Just send an abort to the FW.
已提交
4770 4771
	 */

4772 4773 4774 4775
	cmd = &iocb->iocb;
	icmd = &abtsiocb->iocb;
	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4776 4777 4778 4779
	if (phba->sli_rev == LPFC_SLI_REV4)
		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
	else
		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
已提交
4780

4781 4782
	icmd->ulpLe = 1;
	icmd->ulpClass = cmd->ulpClass;
4783 4784 4785

	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4786
	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4787 4788
	if (iocb->iocb_flag & LPFC_IO_FOF)
		abtsiocb->iocb_flag |= LPFC_IO_FOF;
4789

J
James Smart 已提交
4790
	if (lpfc_is_link_up(phba))
4791 4792 4793
		icmd->ulpCommand = CMD_ABORT_XRI_CN;
	else
		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
已提交
4794

4795
	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
J
James Smart 已提交
4796
	abtsiocb->vport = vport;
4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808
	if (phba->sli_rev == LPFC_SLI_REV4) {
		ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
		pring_s4 = &phba->sli.ring[ring_number];
		/* Note: both hbalock and ring_lock must be set here */
		spin_lock_irqsave(&pring_s4->ring_lock, iflags);
		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
						abtsiocb, 0);
		spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
	} else {
		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
						abtsiocb, 0);
	}
4809
	/* no longer need the lock after this point */
4810
	spin_unlock_irqrestore(&phba->hbalock, flags);
4811

4812 4813

	if (ret_val == IOCB_ERROR) {
4814 4815 4816 4817
		lpfc_sli_release_iocbq(phba, abtsiocb);
		ret = FAILED;
		goto out;
	}
已提交
4818

4819
	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4820 4821
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4822

4823
wait_for_cmpl:
4824
	lpfc_cmd->waitq = &waitq;
4825
	/* Wait for abort to complete */
4826 4827
	wait_event_timeout(waitq,
			  (lpfc_cmd->pCmd != cmnd),
4828
			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4829 4830

	spin_lock_irqsave(shost->host_lock, flags);
4831
	lpfc_cmd->waitq = NULL;
4832
	spin_unlock_irqrestore(shost->host_lock, flags);
已提交
4833

4834 4835
	if (lpfc_cmd->pCmd == cmnd) {
		ret = FAILED;
4836 4837
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0748 abort handler timed out waiting "
4838
				 "for abortng I/O (xri:x%x) to complete: "
H
Hannes Reinecke 已提交
4839
				 "ret %#x, ID %d, LUN %llu\n",
4840 4841
				 iocb->sli4_xritag, ret,
				 cmnd->device->id, cmnd->device->lun);
已提交
4842
	}
4843
	goto out;
已提交
4844

4845
out_unlock:
4846
	spin_unlock_irqrestore(&phba->hbalock, flags);
4847
out:
4848 4849
	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
H
Hannes Reinecke 已提交
4850
			 "LUN %llu\n", ret, cmnd->device->id,
4851
			 cmnd->device->lun);
4852
	return ret;
4853 4854
}

4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877
static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
{
	switch (task_mgmt_cmd) {
	case FCP_ABORT_TASK_SET:
		return "ABORT_TASK_SET";
	case FCP_CLEAR_TASK_SET:
		return "FCP_CLEAR_TASK_SET";
	case FCP_BUS_RESET:
		return "FCP_BUS_RESET";
	case FCP_LUN_RESET:
		return "FCP_LUN_RESET";
	case FCP_TARGET_RESET:
		return "FCP_TARGET_RESET";
	case FCP_CLEAR_ACA:
		return "FCP_CLEAR_ACA";
	case FCP_TERMINATE_TASK:
		return "FCP_TERMINATE_TASK";
	default:
		return "unknown";
	}
}

4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944

/**
 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 *
 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
static int
lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t rsp_info;
	uint32_t rsp_len;
	uint8_t  rsp_info_code;
	int ret = FAILED;


	if (fcprsp == NULL)
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0703 fcp_rsp is missing\n");
	else {
		rsp_info = fcprsp->rspStatus2;
		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
		rsp_info_code = fcprsp->rspInfo3;


		lpfc_printf_vlog(vport, KERN_INFO,
				 LOG_FCP,
				 "0706 fcp_rsp valid 0x%x,"
				 " rsp len=%d code 0x%x\n",
				 rsp_info,
				 rsp_len, rsp_info_code);

		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
			switch (rsp_info_code) {
			case RSP_NO_FAILURE:
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0715 Task Mgmt No Failure\n");
				ret = SUCCESS;
				break;
			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0716 Task Mgmt Target "
						"reject\n");
				break;
			case RSP_TM_NOT_COMPLETED: /* TM failed */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0717 Task Mgmt Target "
						"failed TM\n");
				break;
			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0718 Task Mgmt to invalid "
						"LUN\n");
				break;
			}
		}
	}
	return ret;
}


4945
/**
4946 4947 4948 4949 4950 4951
 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
 * @vport: The virtual port for which this call is being executed.
 * @rdata: Pointer to remote port local data
 * @tgt_id: Target ID of remote device.
 * @lun_id: Lun number for the TMF
 * @task_mgmt_cmd: type of TMF to send
4952
 *
4953 4954
 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
 * a remote port.
4955
 *
4956 4957 4958
 * Return Code:
 *   0x2003 - Error
 *   0x2002 - Success.
4959
 **/
已提交
4960
static int
4961
lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
H
Hannes Reinecke 已提交
4962
		    unsigned  tgt_id, uint64_t lun_id,
4963
		    uint8_t task_mgmt_cmd)
已提交
4964
{
J
James Smart 已提交
4965
	struct lpfc_hba   *phba = vport->phba;
4966
	struct lpfc_scsi_buf *lpfc_cmd;
4967 4968
	struct lpfc_iocbq *iocbq;
	struct lpfc_iocbq *iocbqrsp;
4969
	struct lpfc_nodelist *pnode = rdata->pnode;
4970
	int ret;
4971
	int status;
已提交
4972

4973
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4974
		return FAILED;
4975

4976
	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
已提交
4977
	if (lpfc_cmd == NULL)
4978
		return FAILED;
4979
	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
4980
	lpfc_cmd->rdata = rdata;
已提交
4981

4982 4983
	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
					   task_mgmt_cmd);
4984 4985 4986 4987
	if (!status) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
已提交
4988

4989
	iocbq = &lpfc_cmd->cur_iocbq;
4990
	iocbqrsp = lpfc_sli_get_iocbq(phba);
4991 4992 4993 4994
	if (iocbqrsp == NULL) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
4995
	iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4996

4997
	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
H
Hannes Reinecke 已提交
4998
			 "0702 Issue %s to TGT %d LUN %llu "
4999
			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5000
			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5001 5002
			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
			 iocbq->iocb_flag);
5003

5004
	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5005
					  iocbq, iocbqrsp, lpfc_cmd->timeout);
5006 5007
	if ((status != IOCB_SUCCESS) ||
	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5008
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5009
			 "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) "
5010
			 "iocb_flag x%x\n",
5011 5012
			 lpfc_taskmgmt_name(task_mgmt_cmd),
			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
5013 5014
			 iocbqrsp->iocb.un.ulpWord[4],
			 iocbq->iocb_flag);
5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029
		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
		if (status == IOCB_SUCCESS) {
			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
				/* Something in the FCP_RSP was invalid.
				 * Check conditions */
				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
			else
				ret = FAILED;
		} else if (status == IOCB_TIMEDOUT) {
			ret = TIMEOUT_ERROR;
		} else {
			ret = FAILED;
		}
		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
	} else
5030 5031
		ret = SUCCESS;

5032
	lpfc_sli_release_iocbq(phba, iocbqrsp);
5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054

	if (ret != TIMEOUT_ERROR)
		lpfc_release_scsi_buf(phba, lpfc_cmd);

	return ret;
}

/**
 * lpfc_chk_tgt_mapped -
 * @vport: The virtual port to check on
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine delays until the scsi target (aka rport) for the
 * command exists (is present and logged in) or we declare it non-existent.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
5055
	struct lpfc_rport_data *rdata;
5056
	struct lpfc_nodelist *pnode;
5057 5058
	unsigned long later;

5059
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5060 5061 5062 5063 5064 5065
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076
	/*
	 * If target is not in a MAPPED state, delay until
	 * target is rediscovered or devloss timeout expires.
	 */
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies)) {
		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
			return FAILED;
		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
			return SUCCESS;
		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5077
		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112
		if (!rdata)
			return FAILED;
		pnode = rdata->pnode;
	}
	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
		return FAILED;
	return SUCCESS;
}

/**
 * lpfc_reset_flush_io_context -
 * @vport: The virtual port (scsi_host) for the flush context
 * @tgt_id: If aborting by Target contect - specifies the target id
 * @lun_id: If aborting by Lun context - specifies the lun id
 * @context: specifies the context level to flush at.
 *
 * After a reset condition via TMF, we need to flush orphaned i/o
 * contexts from the adapter. This routine aborts any contexts
 * outstanding, then waits for their completions. The wait is
 * bounded by devloss_tmo though.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
			uint64_t lun_id, lpfc_ctx_cmd context)
{
	struct lpfc_hba   *phba = vport->phba;
	unsigned long later;
	int cnt;

	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5113
	if (cnt)
5114 5115 5116
		lpfc_sli_abort_taskmgmt(vport,
					&phba->sli.ring[phba->sli.fcp_ring],
					tgt_id, lun_id, context);
5117 5118 5119
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies) && cnt) {
		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5120
		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
已提交
5121 5122
	}
	if (cnt) {
5123
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5124 5125 5126 5127 5128 5129
			"0724 I/O flush failure for context %s : cnt x%x\n",
			((context == LPFC_CTX_LUN) ? "LUN" :
			 ((context == LPFC_CTX_TGT) ? "TGT" :
			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
			cnt);
		return FAILED;
已提交
5130
	}
5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149
	return SUCCESS;
}

/**
 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a device reset by sending a LUN_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5150
	struct lpfc_rport_data *rdata;
5151
	struct lpfc_nodelist *pnode;
5152
	unsigned tgt_id = cmnd->device->id;
H
Hannes Reinecke 已提交
5153
	uint64_t lun_id = cmnd->device->lun;
5154
	struct lpfc_scsi_event_header scsi_event;
5155
	int status;
5156

5157
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5158 5159 5160 5161 5162 5163
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0798 Device Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5164
	status = fc_block_scsi_eh(cmnd);
5165
	if (status != 0 && status != SUCCESS)
5166
		return status;
5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0721 Device Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
	scsi_event.lun = lun_id;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
						FCP_LUN_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5188
			 "0713 SCSI layer issued Device Reset (%d, %llu) "
5189 5190 5191 5192 5193 5194 5195 5196
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5197 5198
	if (status == SUCCESS)
		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5199
						LPFC_CTX_LUN);
5200 5201

	return status;
5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219
}

/**
 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a target reset by sending a TARGET_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5220
	struct lpfc_rport_data *rdata;
5221
	struct lpfc_nodelist *pnode;
5222
	unsigned tgt_id = cmnd->device->id;
H
Hannes Reinecke 已提交
5223
	uint64_t lun_id = cmnd->device->lun;
5224
	struct lpfc_scsi_event_header scsi_event;
5225
	int status;
5226

5227
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5228 5229 5230 5231 5232 5233
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0799 Target Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5234
	status = fc_block_scsi_eh(cmnd);
5235
	if (status != 0 && status != SUCCESS)
5236
		return status;
5237 5238 5239 5240 5241

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0722 Target Reset rport failure: rdata x%p\n", rdata);
5242 5243 5244 5245 5246 5247 5248
		spin_lock_irq(shost->host_lock);
		pnode->nlp_flag &= ~NLP_NPR_ADISC;
		pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
		spin_unlock_irq(shost->host_lock);
		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
					  LPFC_CTX_TGT);
		return FAST_IO_FAIL;
5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
					FCP_TARGET_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5264
			 "0723 SCSI layer issued Target Reset (%d, %llu) "
5265 5266 5267 5268 5269 5270 5271 5272
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5273 5274
	if (status == SUCCESS)
		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5275
					  LPFC_CTX_TGT);
5276
	return status;
已提交
5277 5278
}

5279
/**
5280
 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5281 5282
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
5283 5284
 * This routine does target reset to all targets on @cmnd->device->host.
 * This emulates Parallel SCSI Bus Reset Semantics.
5285
 *
5286 5287 5288
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
5289
 **/
5290
static int
5291
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
已提交
5292
{
J
James Smart 已提交
5293 5294
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
已提交
5295
	struct lpfc_nodelist *ndlp = NULL;
5296
	struct lpfc_scsi_event_header scsi_event;
5297 5298
	int match;
	int ret = SUCCESS, status, i;
5299 5300 5301 5302 5303 5304 5305

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));

5306 5307
	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
已提交
5308

5309
	status = fc_block_scsi_eh(cmnd);
5310
	if (status != 0 && status != SUCCESS)
5311
		return status;
5312

已提交
5313 5314 5315 5316 5317
	/*
	 * Since the driver manages a single bus device, reset all
	 * targets known to the driver.  Should any target reset
	 * fail, this routine returns failure to the midlayer.
	 */
5318
	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5319
		/* Search for mapped node by target ID */
已提交
5320
		match = 0;
J
James Smart 已提交
5321 5322
		spin_lock_irq(shost->host_lock);
		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5323 5324
			if (!NLP_CHK_NODE_ACT(ndlp))
				continue;
5325 5326 5327
			if (vport->phba->cfg_fcp2_no_tgt_reset &&
			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
				continue;
5328
			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5329
			    ndlp->nlp_sid == i &&
5330
			    ndlp->rport) {
已提交
5331 5332 5333 5334
				match = 1;
				break;
			}
		}
J
James Smart 已提交
5335
		spin_unlock_irq(shost->host_lock);
已提交
5336 5337
		if (!match)
			continue;
5338 5339 5340 5341 5342

		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
					i, 0, FCP_TARGET_RESET);

		if (status != SUCCESS) {
5343 5344 5345
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0700 Bus Reset on target %d failed\n",
					 i);
5346
			ret = FAILED;
已提交
5347 5348
		}
	}
5349
	/*
5350 5351 5352 5353
	 * We have to clean up i/o as : they may be orphaned by the TMFs
	 * above; or if any of the TMFs failed, they may be in an
	 * indeterminate state.
	 * We will report success if all the i/o aborts successfully.
5354
	 */
5355 5356 5357

	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
	if (status != SUCCESS)
5358
		ret = FAILED;
5359

5360 5361
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
已提交
5362 5363 5364
	return ret;
}

5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388
/**
 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does host reset to the adaptor port. It brings the HBA
 * offline, performs a board restart, and then brings the board back online.
 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
 * reject all outstanding SCSI commands to the host and error returned
 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
 * of error handling, it will only return error if resetting of the adapter
 * is not successful; in all other cases, will return success.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba *phba = vport->phba;
	int rc, ret = SUCCESS;

5389 5390 5391
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "3172 SCSI layer issued Host Reset Data:\n");

5392
	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5393 5394 5395 5396
	lpfc_offline(phba);
	rc = lpfc_sli_brdrestart(phba);
	if (rc)
		ret = FAILED;
5397 5398 5399
	rc = lpfc_online(phba);
	if (rc)
		ret = FAILED;
5400 5401
	lpfc_unblock_mgmt_io(phba);

5402 5403 5404 5405 5406
	if (ret == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "3323 Failed host reset, bring it offline\n");
		lpfc_sli4_offline_eratt(phba);
	}
5407 5408 5409
	return ret;
}

5410
/**
5411
 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422
 * @sdev: Pointer to scsi_device.
 *
 * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
 * globally available list of scsi buffers. This routine also makes sure scsi
 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
 * of scsi buffer exists for the lifetime of the driver.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
已提交
5423 5424 5425
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
J
James Smart 已提交
5426 5427
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5428
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5429
	uint32_t total = 0;
已提交
5430
	uint32_t num_to_alloc = 0;
5431
	int num_allocated = 0;
5432
	uint32_t sdev_cnt;
5433 5434 5435
	struct lpfc_device_data *device_data;
	unsigned long flags;
	struct lpfc_name target_wwpn;
已提交
5436

5437
	if (!rport || fc_remote_port_chkready(rport))
已提交
5438 5439
		return -ENXIO;

5440
	if (phba->cfg_fof) {
5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471

		/*
		 * Check to see if the device data structure for the lun
		 * exists.  If not, create one.
		 */

		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
		spin_lock_irqsave(&phba->devicelock, flags);
		device_data = __lpfc_get_device_data(phba,
						     &phba->luns,
						     &vport->fc_portname,
						     &target_wwpn,
						     sdev->lun);
		if (!device_data) {
			spin_unlock_irqrestore(&phba->devicelock, flags);
			device_data = lpfc_create_device_data(phba,
							&vport->fc_portname,
							&target_wwpn,
							sdev->lun, true);
			if (!device_data)
				return -ENOMEM;
			spin_lock_irqsave(&phba->devicelock, flags);
			list_add_tail(&device_data->listentry, &phba->luns);
		}
		device_data->rport_data = rport->dd_data;
		device_data->available = true;
		spin_unlock_irqrestore(&phba->devicelock, flags);
		sdev->hostdata = device_data;
	} else {
		sdev->hostdata = rport->dd_data;
	}
5472
	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
已提交
5473 5474 5475 5476

	/*
	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
	 * available list of scsi buffers.  Don't allocate more than the
5477 5478 5479
	 * HBA limit conveyed to the midlayer via the host structure.  The
	 * formula accounts for the lun_queue_depth + error handlers + 1
	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
已提交
5480 5481
	 */
	total = phba->total_scsi_bufs;
5482
	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5483

5484 5485 5486 5487
	/* If allocated buffers are enough do nothing */
	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
		return 0;

5488 5489
	/* Allow some exchanges to be available always to complete discovery */
	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5490 5491 5492
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0704 At limitation of %d preallocated "
				 "command buffers\n", total);
已提交
5493
		return 0;
5494 5495 5496
	/* Allow some exchanges to be available always to complete discovery */
	} else if (total + num_to_alloc >
		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5497 5498 5499 5500 5501 5502
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0705 Allocation request of %d "
				 "command buffers will exceed max of %d.  "
				 "Reducing allocation request to %d.\n",
				 num_to_alloc, phba->cfg_hba_queue_depth,
				 (phba->cfg_hba_queue_depth - total));
已提交
5503 5504
		num_to_alloc = phba->cfg_hba_queue_depth - total;
	}
5505 5506
	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
	if (num_to_alloc != num_allocated) {
5507 5508 5509 5510 5511
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0708 Allocation request of %d "
					 "command buffers did not succeed.  "
					 "Allocated %d buffers.\n",
					 num_to_alloc, num_allocated);
已提交
5512
	}
5513 5514
	if (num_allocated > 0)
		phba->total_scsi_bufs += num_allocated;
已提交
5515 5516 5517
	return 0;
}

5518
/**
5519
 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5520 5521 5522 5523 5524 5525 5526 5527 5528
 * @sdev: Pointer to scsi_device.
 *
 * This routine configures following items
 *   - Tag command queuing support for @sdev if supported.
 *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
 *
 * Return codes:
 *   0 - Success
 **/
已提交
5529 5530 5531
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
J
James Smart 已提交
5532 5533
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
5534

5535
	scsi_adjust_queue_depth(sdev, vport->cfg_lun_queue_depth);
已提交
5536

5537
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5538 5539
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5540 5541 5542 5543
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
5544 5545 5546
	return 0;
}

5547
/**
5548
 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5549 5550 5551 5552
 * @sdev: Pointer to scsi_device.
 *
 * This routine sets @sdev hostatdata filed to null.
 **/
已提交
5553 5554 5555
static void
lpfc_slave_destroy(struct scsi_device *sdev)
{
5556 5557
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5558 5559 5560
	unsigned long flags;
	struct lpfc_device_data *device_data = sdev->hostdata;

5561
	atomic_dec(&phba->sdev_cnt);
5562
	if ((phba->cfg_fof) && (device_data)) {
5563 5564 5565 5566 5567 5568
		spin_lock_irqsave(&phba->devicelock, flags);
		device_data->available = false;
		if (!device_data->oas_enabled)
			lpfc_delete_device_data(phba, device_data);
		spin_unlock_irqrestore(&phba->devicelock, flags);
	}
已提交
5569 5570 5571 5572
	sdev->hostdata = NULL;
	return;
}

5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600
/**
 * lpfc_create_device_data - creates and initializes device data structure for OAS
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun on target
 * @atomic_create: Flag to indicate if memory should be allocated using the
 *		  GFP_ATOMIC flag or not.
 *
 * This routine creates a device data structure which will contain identifying
 * information for the device (host wwpn, target wwpn, lun), state of OAS,
 * whether or not the corresponding lun is available by the system,
 * and pointer to the rport data.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_device_data - Success
 **/
struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
			struct lpfc_name *target_wwpn, uint64_t lun,
			bool atomic_create)
{

	struct lpfc_device_data *lun_info;
	int memory_flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5601
	    !(phba->cfg_fof))
5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638
		return NULL;

	/* Attempt to create the device data to contain lun info */

	if (atomic_create)
		memory_flags = GFP_ATOMIC;
	else
		memory_flags = GFP_KERNEL;
	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
	if (!lun_info)
		return NULL;
	INIT_LIST_HEAD(&lun_info->listentry);
	lun_info->rport_data  = NULL;
	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
	       sizeof(struct lpfc_name));
	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
	       sizeof(struct lpfc_name));
	lun_info->device_id.lun = lun;
	lun_info->oas_enabled = false;
	lun_info->available = false;
	return lun_info;
}

/**
 * lpfc_delete_device_data - frees a device data structure for OAS
 * @pha: Pointer to host bus adapter structure.
 * @lun_info: Pointer to device data structure to free.
 *
 * This routine frees the previously allocated device data structure passed.
 *
 **/
void
lpfc_delete_device_data(struct lpfc_hba *phba,
			struct lpfc_device_data *lun_info)
{

	if (unlikely(!phba) || !lun_info  ||
5639
	    !(phba->cfg_fof))
5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672
		return;

	if (!list_empty(&lun_info->listentry))
		list_del(&lun_info->listentry);
	mempool_free(lun_info, phba->device_data_mem_pool);
	return;
}

/**
 * __lpfc_get_device_data - returns the device data for the specified lun
 * @pha: Pointer to host bus adapter structure.
 * @list: Point to list to search.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun on target
 *
 * This routine searches the list passed for the specified lun's device data.
 * This function does not hold locks, it is the responsibility of the caller
 * to ensure the proper lock is held before calling the function.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_device_data - Success
 **/
struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
		       struct lpfc_name *vport_wwpn,
		       struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;

	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5673
	    !phba->cfg_fof)
5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734
		return NULL;

	/* Check to see if the lun is already enabled for OAS. */

	list_for_each_entry(lun_info, list, listentry) {
		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
			    sizeof(struct lpfc_name)) == 0) &&
		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
			    sizeof(struct lpfc_name)) == 0) &&
		    (lun_info->device_id.lun == lun))
			return lun_info;
	}

	return NULL;
}

/**
 * lpfc_find_next_oas_lun - searches for the next oas lun
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @starting_lun: Pointer to the lun to start searching for
 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
 * @found_target_wwpn: Pointer to the found lun's target wwpn information
 * @found_lun: Pointer to the found lun.
 * @found_lun_status: Pointer to status of the found lun.
 *
 * This routine searches the luns list for the specified lun
 * or the first lun for the vport/target.  If the vport wwpn contains
 * a zero value then a specific vport is not specified. In this case
 * any vport which contains the lun will be considered a match.  If the
 * target wwpn contains a zero value then a specific target is not specified.
 * In this case any target which contains the lun will be considered a
 * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
 * are returned.  The function will also return the next lun if available.
 * If the next lun is not found, starting_lun parameter will be set to
 * NO_MORE_OAS_LUN.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
bool
lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
		       struct lpfc_name *found_vport_wwpn,
		       struct lpfc_name *found_target_wwpn,
		       uint64_t *found_lun,
		       uint32_t *found_lun_status)
{

	unsigned long flags;
	struct lpfc_device_data *lun_info;
	struct lpfc_device_id *device_id;
	uint64_t lun;
	bool found = false;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
	    !starting_lun || !found_vport_wwpn ||
	    !found_target_wwpn || !found_lun || !found_lun_status ||
	    (*starting_lun == NO_MORE_OAS_LUN) ||
5735
	    !phba->cfg_fof)
5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818
		return false;

	lun = *starting_lun;
	*found_lun = NO_MORE_OAS_LUN;
	*starting_lun = NO_MORE_OAS_LUN;

	/* Search for lun or the lun closet in value */

	spin_lock_irqsave(&phba->devicelock, flags);
	list_for_each_entry(lun_info, &phba->luns, listentry) {
		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
			    sizeof(struct lpfc_name)) == 0)) &&
		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
			    sizeof(struct lpfc_name)) == 0)) &&
		    (lun_info->oas_enabled)) {
			device_id = &lun_info->device_id;
			if ((!found) &&
			    ((lun == FIND_FIRST_OAS_LUN) ||
			     (device_id->lun == lun))) {
				*found_lun = device_id->lun;
				memcpy(found_vport_wwpn,
				       &device_id->vport_wwpn,
				       sizeof(struct lpfc_name));
				memcpy(found_target_wwpn,
				       &device_id->target_wwpn,
				       sizeof(struct lpfc_name));
				if (lun_info->available)
					*found_lun_status =
						OAS_LUN_STATUS_EXISTS;
				else
					*found_lun_status = 0;
				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
					memset(vport_wwpn, 0x0,
					       sizeof(struct lpfc_name));
				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
					memset(target_wwpn, 0x0,
					       sizeof(struct lpfc_name));
				found = true;
			} else if (found) {
				*starting_lun = device_id->lun;
				memcpy(vport_wwpn, &device_id->vport_wwpn,
				       sizeof(struct lpfc_name));
				memcpy(target_wwpn, &device_id->target_wwpn,
				       sizeof(struct lpfc_name));
				break;
			}
		}
	}
	spin_unlock_irqrestore(&phba->devicelock, flags);
	return found;
}

/**
 * lpfc_enable_oas_lun - enables a lun for OAS operations
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun
 *
 * This routine enables a lun for oas operations.  The routines does so by
 * doing the following :
 *
 *   1) Checks to see if the device data for the lun has been created.
 *   2) If found, sets the OAS enabled flag if not set and returns.
 *   3) Otherwise, creates a device data structure.
 *   4) If successfully created, indicates the device data is for an OAS lun,
 *   indicates the lun is not available and add to the list of luns.
 *
 * Return codes:
 *   false - Error
 *   true - Success
 **/
bool
lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		    struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;
	unsigned long flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5819
	    !phba->cfg_fof)
5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875
		return false;

	spin_lock_irqsave(&phba->devicelock, flags);

	/* Check to see if the device data for the lun has been created */
	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
					  target_wwpn, lun);
	if (lun_info) {
		if (!lun_info->oas_enabled)
			lun_info->oas_enabled = true;
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}

	/* Create an lun info structure and add to list of luns */
	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
					   false);
	if (lun_info) {
		lun_info->oas_enabled = true;
		lun_info->available = false;
		list_add_tail(&lun_info->listentry, &phba->luns);
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}
	spin_unlock_irqrestore(&phba->devicelock, flags);
	return false;
}

/**
 * lpfc_disable_oas_lun - disables a lun for OAS operations
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun
 *
 * This routine disables a lun for oas operations.  The routines does so by
 * doing the following :
 *
 *   1) Checks to see if the device data for the lun is created.
 *   2) If present, clears the flag indicating this lun is for OAS.
 *   3) If the lun is not available by the system, the device data is
 *   freed.
 *
 * Return codes:
 *   false - Error
 *   true - Success
 **/
bool
lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		     struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;
	unsigned long flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5876
	    !phba->cfg_fof)
5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895
		return false;

	spin_lock_irqsave(&phba->devicelock, flags);

	/* Check to see if the lun is available. */
	lun_info = __lpfc_get_device_data(phba,
					  &phba->luns, vport_wwpn,
					  target_wwpn, lun);
	if (lun_info) {
		lun_info->oas_enabled = false;
		if (!lun_info->available)
			lpfc_delete_device_data(phba, lun_info);
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}

	spin_unlock_irqrestore(&phba->devicelock, flags);
	return false;
}
5896

已提交
5897 5898 5899 5900 5901 5902
struct scsi_host_template lpfc_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
5903 5904
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
5905
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5906
	.eh_host_reset_handler  = lpfc_host_reset_handler,
已提交
5907 5908 5909
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
5910
	.scan_finished		= lpfc_scan_finished,
已提交
5911
	.this_id		= -1,
5912
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
已提交
5913 5914
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
J
James Smart 已提交
5915
	.shost_attrs		= lpfc_hba_attrs,
5916
	.max_sectors		= 0xFFFF,
5917
	.vendor_id		= LPFC_NL_VENDOR_ID,
5918
	.change_queue_depth	= lpfc_change_queue_depth,
5919
	.change_queue_type	= scsi_change_queue_type,
5920
	.use_blk_tags		= 1,
已提交
5921
};
5922 5923 5924 5925 5926 5927 5928

struct scsi_host_template lpfc_vport_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
5929 5930
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
5931 5932 5933 5934 5935 5936
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
5937
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5938 5939 5940 5941
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_vport_attrs,
	.max_sectors		= 0xFFFF,
5942
	.change_queue_depth	= lpfc_change_queue_depth,
5943
	.change_queue_type	= scsi_change_queue_type,
5944
	.use_blk_tags		= 1,
5945
};