lpfc_scsi.c 175.1 KB
Newer Older
1
/*******************************************************************
已提交
2
 * This file is part of the Emulex Linux Device Driver for         *
3
 * Fibre Channel Host Bus Adapters.                                *
4
 * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *
5
 * EMULEX and SLI are trademarks of Emulex.                        *
已提交
6
 * www.emulex.com                                                  *
7
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
已提交
8 9
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
10 11 12 13 14 15 16 17 18 19
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
已提交
20 21
 *******************************************************************/
#include <linux/pci.h>
22
#include <linux/slab.h>
已提交
23
#include <linux/interrupt.h>
24
#include <linux/export.h>
25
#include <linux/delay.h>
26
#include <asm/unaligned.h>
27 28
#include <linux/crc-t10dif.h>
#include <net/checksum.h>
已提交
29 30 31

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
32
#include <scsi/scsi_eh.h>
已提交
33 34 35 36 37
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>

#include "lpfc_version.h"
38
#include "lpfc_hw4.h"
已提交
39 40
#include "lpfc_hw.h"
#include "lpfc_sli.h"
41
#include "lpfc_sli4.h"
42
#include "lpfc_nl.h"
已提交
43 44
#include "lpfc_disc.h"
#include "lpfc.h"
45
#include "lpfc_scsi.h"
已提交
46 47
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
48
#include "lpfc_vport.h"
已提交
49 50 51 52

#define LPFC_RESET_WAIT  2
#define LPFC_ABORT_WAIT  2

53
int _dump_buf_done = 1;
54 55

static char *dif_op_str[] = {
56 57 58 59 60 61 62 63 64
	"PROT_NORMAL",
	"PROT_READ_INSERT",
	"PROT_WRITE_STRIP",
	"PROT_READ_STRIP",
	"PROT_WRITE_INSERT",
	"PROT_READ_PASS",
	"PROT_WRITE_PASS",
};

65 66 67 68 69 70
struct scsi_dif_tuple {
	__be16 guard_tag;       /* Checksum */
	__be16 app_tag;         /* Opaque storage */
	__be32 ref_tag;         /* Target LBA or indirect LBA */
};

71 72 73 74 75
static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
{
	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;

76
	if (vport->phba->cfg_fof)
77 78 79 80 81
		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
	else
		return (struct lpfc_rport_data *)sdev->hostdata;
}

82 83
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
84 85
static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
86 87
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
88 89

static void
90
lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
91 92 93 94 95
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_sglist(cmnd);

	if (!_dump_buf_data) {
96 97
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
98 99 100 101 102 103
				__func__);
		return;
	}


	if (!sgde) {
104 105
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9051 BLKGRD: ERROR: data scatterlist is null\n");
106 107 108 109 110 111 112 113 114 115 116 117 118
		return;
	}

	dst = (void *) _dump_buf_data;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

static void
119
lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
120 121 122 123 124
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_prot_sglist(cmnd);

	if (!_dump_buf_dif) {
125 126
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
127 128 129 130 131
				__func__);
		return;
	}

	if (!sgde) {
132 133
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
134 135 136 137 138 139 140 141 142 143 144 145
		return;
	}

	dst = _dump_buf_dif;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
{
	return sc->device->sector_size;
}

#define LPFC_CHECK_PROTECT_GUARD	1
#define LPFC_CHECK_PROTECT_REF		2
static inline unsigned
lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
{
	return 1;
}

static inline unsigned
lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
{
	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
		return 0;
	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
		return 1;
	return 0;
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
/**
 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called from the lpfc_prep_task_mgmt_cmd function to
 * set the last bit in the response sge entry.
 **/
static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
				struct lpfc_scsi_buf *lpfc_cmd)
{
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
	if (sgl) {
		sgl += 1;
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}
}

191
/**
192
 * lpfc_update_stats - Update statistical data for the command completion
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called when there is a command completion and this
 * function updates the statistical data for the command completion.
 **/
static void
lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
{
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	unsigned long flags;
	struct Scsi_Host  *shost = cmd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	unsigned long latency;
	int i;

	if (cmd->result)
		return;

214 215
	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);

216 217 218
	spin_lock_irqsave(shost->host_lock, flags);
	if (!vport->stat_data_enabled ||
		vport->stat_data_blocked ||
219
		!pnode ||
220 221 222 223 224 225 226 227 228
		!pnode->lat_data ||
		(phba->bucket_type == LPFC_NO_BUCKET)) {
		spin_unlock_irqrestore(shost->host_lock, flags);
		return;
	}

	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
			phba->bucket_step;
229 230 231 232 233
		/* check array subscript bounds */
		if (i < 0)
			i = 0;
		else if (i >= LPFC_MAX_BUCKET_COUNT)
			i = LPFC_MAX_BUCKET_COUNT - 1;
234 235 236 237 238 239 240 241 242 243 244
	} else {
		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
			if (latency <= (phba->bucket_base +
				((1<<i)*phba->bucket_step)))
				break;
	}

	pnode->lat_data[i].cmd_count++;
	spin_unlock_irqrestore(shost->host_lock, flags);
}

245
/**
246
 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
247 248 249 250 251 252 253 254 255
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called when there is resource error in driver or firmware.
 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
 * posts at most 1 event each second. This routine wakes up worker thread of
 * @phba to process WORKER_RAM_DOWN_EVENT event.
 *
 * This routine should be called with no lock held.
 **/
256
void
257
lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
258 259
{
	unsigned long flags;
260
	uint32_t evt_posted;
M
Manuel Schölling 已提交
261
	unsigned long expires;
262 263 264 265 266

	spin_lock_irqsave(&phba->hbalock, flags);
	atomic_inc(&phba->num_rsrc_err);
	phba->last_rsrc_error_time = jiffies;

M
Manuel Schölling 已提交
267 268
	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
	if (time_after(expires, jiffies)) {
269 270 271 272 273 274 275 276 277
		spin_unlock_irqrestore(&phba->hbalock, flags);
		return;
	}

	phba->last_ramp_down_time = jiffies;

	spin_unlock_irqrestore(&phba->hbalock, flags);

	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
278 279
	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
	if (!evt_posted)
280 281 282
		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);

283 284
	if (!evt_posted)
		lpfc_worker_wake_up(phba);
285 286 287
	return;
}

288
/**
289
 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
290 291 292 293 294 295
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
 * thread.This routine reduces queue depth for all scsi device on each vport
 * associated with @phba.
 **/
296 297 298
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
299 300
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
301
	struct scsi_device *sdev;
302
	unsigned long new_queue_depth;
303
	unsigned long num_rsrc_err, num_cmd_success;
304
	int i;
305 306 307 308

	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
	num_cmd_success = atomic_read(&phba->num_cmd_success);

309 310 311 312 313 314 315 316
	/*
	 * The error and success command counters are global per
	 * driver instance.  If another handler has already
	 * operated on this error event, just exit.
	 */
	if (num_rsrc_err == 0)
		return;

317 318
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
319
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
320 321
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
322
				new_queue_depth =
323 324 325 326 327 328 329
					sdev->queue_depth * num_rsrc_err /
					(num_rsrc_err + num_cmd_success);
				if (!new_queue_depth)
					new_queue_depth = sdev->queue_depth - 1;
				else
					new_queue_depth = sdev->queue_depth -
								new_queue_depth;
330
				scsi_change_queue_depth(sdev, new_queue_depth);
331
			}
332
		}
333
	lpfc_destroy_vport_work_array(phba, vports);
334 335 336 337
	atomic_set(&phba->num_rsrc_err, 0);
	atomic_set(&phba->num_cmd_success, 0);
}

338
/**
339
 * lpfc_scsi_dev_block - set all scsi hosts to block state
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
 * @phba: Pointer to HBA context object.
 *
 * This function walks vport list and set each SCSI host to block state
 * by invoking fc_remote_port_delete() routine. This function is invoked
 * with EEH when device's PCI slot has been permanently disabled.
 **/
void
lpfc_scsi_dev_block(struct lpfc_hba *phba)
{
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
	struct scsi_device *sdev;
	struct fc_rport *rport;
	int i;

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
357
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
358 359 360 361 362 363 364 365 366
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
				rport = starget_to_rport(scsi_target(sdev));
				fc_remote_port_delete(rport);
			}
		}
	lpfc_destroy_vport_work_array(phba, vports);
}

367
/**
368
 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
369
 * @vport: The virtual port for which this call being executed.
370
 * @num_to_allocate: The requested number of buffers to allocate.
371
 *
372 373 374 375 376 377
 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
 * the scsi buffer contains all the necessary information needed to initiate
 * a SCSI I/O. The non-DMAable buffer region contains information to build
 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
 * and the initial BPL. In addition to allocating memory, the FCP CMND and
 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
378 379
 *
 * Return codes:
380 381
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
382
 **/
383 384
static int
lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
已提交
385
{
J
James Smart 已提交
386
	struct lpfc_hba *phba = vport->phba;
已提交
387 388 389
	struct lpfc_scsi_buf *psb;
	struct ulp_bde64 *bpl;
	IOCB_t *iocb;
390 391 392
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
	dma_addr_t pdma_phys_bpl;
393
	uint16_t iotag;
394 395 396 397 398 399 400 401 402 403
	int bcnt, bpl_size;

	bpl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp), bpl_size);
已提交
404

405 406 407 408
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
已提交
409

410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
		/*
		 * Get memory from the pci pool to map the virt space to pci
		 * bus space for an I/O.  The DMA buffer includes space for the
		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
		 * necessary to support the sg_tablesize.
		 */
		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
					GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}

		/* Initialize virtual ptrs to dma_buf region. */
		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
					psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;

		psb->fcp_cmnd = psb->data;
		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
439
			sizeof(struct fcp_rsp);
已提交
440

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
		/* Initialize local short-hand pointers. */
		bpl = psb->fcp_bpl;
		pdma_phys_fcp_cmd = psb->dma_handle;
		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
			sizeof(struct fcp_rsp);

		/*
		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
		 * are sg list bdes.  Initialize the first two and leave the
		 * rest for queuecommand.
		 */
		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);

		/* Setup the physical region for the FCP RSP */
		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		if ((phba->sli_rev == 3) &&
				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
			/* fill in immediate fcp command BDE */
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
					unsli3.fcp_ext.icd);
			iocb->un.fcpi64.bdl.addrHigh = 0;
			iocb->ulpBdeCount = 0;
			iocb->ulpLe = 0;
L
Lucas De Marchi 已提交
482
			/* fill in response BDE */
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
							BUFF_TYPE_BDE_64;
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
				sizeof(struct fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrLow =
				putPaddrLow(pdma_phys_fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrHigh =
				putPaddrHigh(pdma_phys_fcp_rsp);
		} else {
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
			iocb->un.fcpi64.bdl.bdeSize =
					(2 * sizeof(struct ulp_bde64));
			iocb->un.fcpi64.bdl.addrLow =
					putPaddrLow(pdma_phys_bpl);
			iocb->un.fcpi64.bdl.addrHigh =
					putPaddrHigh(pdma_phys_bpl);
			iocb->ulpBdeCount = 1;
			iocb->ulpLe = 1;
		}
		iocb->ulpClass = CLASS3;
		psb->status = IOSTAT_SUCCESS;
504
		/* Put it back into the SCSI buffer list */
J
James Smart 已提交
505
		psb->cur_iocbq.context1  = psb;
506
		lpfc_release_scsi_buf_s3(phba, psb);
已提交
507

508
	}
已提交
509

510
	return bcnt;
已提交
511 512
}

513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
/**
 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
 * @vport: pointer to lpfc vport data structure.
 *
 * This routine is invoked by the vport cleanup for deletions and the cleanup
 * for an ndlp on removal.
 **/
void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;

	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	list_for_each_entry_safe(psb, next_psb,
				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->rdata && psb->rdata->pnode
			&& psb->rdata->pnode->vport == vport)
			psb->rdata = NULL;
	}
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	spin_unlock_irqrestore(&phba->hbalock, iflag);
}

539 540 541 542 543 544 545 546 547 548 549 550 551
/**
 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
 * @phba: pointer to lpfc hba data structure.
 * @axri: pointer to the fcp xri abort wcqe structure.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
 * FCP aborted xri.
 **/
void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
			  struct sli4_wcqe_xri_aborted *axri)
{
	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
552
	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
553 554
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;
555 556
	struct lpfc_iocbq *iocbq;
	int i;
557 558
	struct lpfc_nodelist *ndlp;
	int rrq_empty = 0;
559
	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
560

561 562
	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
563 564 565 566
	list_for_each_entry_safe(psb, next_psb,
		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->cur_iocbq.sli4_xritag == xri) {
			list_del(&psb->list);
567
			psb->exch_busy = 0;
568
			psb->status = IOSTAT_SUCCESS;
569 570
			spin_unlock(
				&phba->sli4_hba.abts_scsi_buf_list_lock);
571 572 573 574 575
			if (psb->rdata && psb->rdata->pnode)
				ndlp = psb->rdata->pnode;
			else
				ndlp = NULL;

576
			rrq_empty = list_empty(&phba->active_rrq_list);
577
			spin_unlock_irqrestore(&phba->hbalock, iflag);
578
			if (ndlp) {
579 580
				lpfc_set_rrq_active(phba, ndlp,
					psb->cur_iocbq.sli4_lxritag, rxid, 1);
581 582
				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
			}
583
			lpfc_release_scsi_buf_s4(phba, psb);
584 585
			if (rrq_empty)
				lpfc_worker_wake_up(phba);
586 587 588
			return;
		}
	}
589 590 591 592 593 594 595 596 597 598 599 600
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	for (i = 1; i <= phba->sli.last_iotag; i++) {
		iocbq = phba->sli.iocbq_lookup[i];

		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
			continue;
		if (iocbq->sli4_xritag != xri)
			continue;
		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
		psb->exch_busy = 0;
		spin_unlock_irqrestore(&phba->hbalock, iflag);
601
		if (!list_empty(&pring->txq))
602
			lpfc_worker_wake_up(phba);
603 604 605 606
		return;

	}
	spin_unlock_irqrestore(&phba->hbalock, iflag);
607 608 609
}

/**
610
 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
611
 * @phba: pointer to lpfc hba data structure.
612
 * @post_sblist: pointer to the scsi buffer list.
613
 *
614 615 616 617 618 619
 * This routine walks a list of scsi buffers that was passed in. It attempts
 * to construct blocks of scsi buffer sgls which contains contiguous xris and
 * uses the non-embedded SGL block post mailbox commands to post to the port.
 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
 * embedded SGL post mailbox command for posting. The @post_sblist passed in
 * must be local list, thus no lock is needed when manipulate the list.
620
 *
621
 * Returns: 0 = failure, non-zero number of successfully posted buffers.
622
 **/
623
static int
624 625
lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
			     struct list_head *post_sblist, int sb_count)
626
{
627
	struct lpfc_scsi_buf *psb, *psb_next;
628
	int status, sgl_size;
629 630 631 632 633 634 635 636 637 638 639
	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
	dma_addr_t pdma_phys_bpl1;
	int last_xritag = NO_XRI;
	LIST_HEAD(prep_sblist);
	LIST_HEAD(blck_sblist);
	LIST_HEAD(scsi_sblist);

	/* sanity check */
	if (sb_count <= 0)
		return -EINVAL;

640 641 642
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
		list_del_init(&psb->list);
		block_cnt++;
		if ((last_xritag != NO_XRI) &&
		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
			/* a hole in xri block, form a sgl posting block */
			list_splice_init(&prep_sblist, &blck_sblist);
			post_cnt = block_cnt - 1;
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			block_cnt = 1;
		} else {
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			/* enough sgls for non-embed sgl mbox command */
			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
				block_cnt = 0;
662
			}
663 664 665
		}
		num_posting++;
		last_xritag = psb->cur_iocbq.sli4_xritag;
666

667 668 669 670 671 672 673 674
		/* end of repost sgl list condition for SCSI buffers */
		if (num_posting == sb_count) {
			if (post_cnt == 0) {
				/* last sgl posting block */
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
			} else if (block_cnt == 1) {
				/* last single sgl with non-contiguous xri */
675
				if (sgl_size > SGL_PAGE_SIZE)
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
					pdma_phys_bpl1 = psb->dma_phys_bpl +
								SGL_PAGE_SIZE;
				else
					pdma_phys_bpl1 = 0;
				status = lpfc_sli4_post_sgl(phba,
						psb->dma_phys_bpl,
						pdma_phys_bpl1,
						psb->cur_iocbq.sli4_xritag);
				if (status) {
					/* failure, put on abort scsi list */
					psb->exch_busy = 1;
				} else {
					/* success, put on SCSI buffer list */
					psb->exch_busy = 0;
					psb->status = IOSTAT_SUCCESS;
					num_posted++;
				}
				/* success, put on SCSI buffer sgl list */
				list_add_tail(&psb->list, &scsi_sblist);
			}
		}
697

698 699
		/* continue until a nembed page worth of sgls */
		if (post_cnt == 0)
700
			continue;
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716

		/* post block of SCSI buffer list sgls */
		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
						       post_cnt);

		/* don't reset xirtag due to hole in xri block */
		if (block_cnt == 0)
			last_xritag = NO_XRI;

		/* reset SCSI buffer post count for next round of posting */
		post_cnt = 0;

		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
		while (!list_empty(&blck_sblist)) {
			list_remove_head(&blck_sblist, psb,
					 struct lpfc_scsi_buf, list);
717
			if (status) {
718
				/* failure, put on abort scsi list */
719 720
				psb->exch_busy = 1;
			} else {
721
				/* success, put on SCSI buffer list */
722
				psb->exch_busy = 0;
723
				psb->status = IOSTAT_SUCCESS;
724
				num_posted++;
725
			}
726
			list_add_tail(&psb->list, &scsi_sblist);
727 728
		}
	}
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	/* Push SCSI buffers with sgl posted to the availble list */
	while (!list_empty(&scsi_sblist)) {
		list_remove_head(&scsi_sblist, psb,
				 struct lpfc_scsi_buf, list);
		lpfc_release_scsi_buf_s4(phba, psb);
	}
	return num_posted;
}

/**
 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
 * @phba: pointer to lpfc hba data structure.
 *
 * This routine walks the list of scsi buffers that have been allocated and
 * repost them to the port by using SGL block post. This is needed after a
 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
 *
 * Returns: 0 = success, non-zero failure.
 **/
int
lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
{
	LIST_HEAD(post_sblist);
	int num_posted, rc = 0;

	/* get all SCSI buffers need to repost to a local list */
757
	spin_lock_irq(&phba->scsi_buf_list_get_lock);
758
	spin_lock(&phba->scsi_buf_list_put_lock);
759 760
	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
761
	spin_unlock(&phba->scsi_buf_list_put_lock);
762
	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
763 764 765 766 767 768 769 770 771

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist)) {
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
						phba->sli4_hba.scsi_xri_cnt);
		/* failed to post any scsi buffer, return error */
		if (num_posted == 0)
			rc = -EIO;
	}
772 773 774 775 776 777 778 779
	return rc;
}

/**
 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
780
 * This routine allocates scsi buffers for device with SLI-4 interface spec,
781
 * the scsi buffer contains all the necessary information needed to initiate
782 783
 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
 * them on a list, it post them to the port by using SGL block post.
784 785
 *
 * Return codes:
786
 *   int - number of scsi buffers that were allocated and posted.
787 788 789 790 791 792 793 794 795 796 797
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static int
lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb;
	struct sli4_sge *sgl;
	IOCB_t *iocb;
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
798
	dma_addr_t pdma_phys_bpl;
799
	uint16_t iotag, lxri = 0;
800
	int bcnt, num_posted, sgl_size;
801 802 803
	LIST_HEAD(prep_sblist);
	LIST_HEAD(post_sblist);
	LIST_HEAD(scsi_sblist);
804

805 806 807 808 809 810 811 812 813
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp));

814 815 816 817 818
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
		/*
819 820 821 822
		 * Get memory from the pci pool to map the virt space to
		 * pci bus space for an I/O. The DMA buffer includes space
		 * for the struct fcp_cmnd, struct fcp_rsp and the number
		 * of bde's necessary to support the sg_tablesize.
823 824 825 826 827 828 829 830 831
		 */
		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
						GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}
		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

832 833 834 835 836 837
		/*
		 * 4K Page alignment is CRITICAL to BlockGuard, double check
		 * to be sure.
		 */
		if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
838 839 840 841 842 843
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
				      psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}

844 845 846

		lxri = lpfc_sli4_next_xritag(phba);
		if (lxri == NO_XRI) {
847
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
848
			      psb->data, psb->dma_handle);
849 850 851 852
			kfree(psb);
			break;
		}

853 854 855
		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
856
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
857
				psb->data, psb->dma_handle);
858
			kfree(psb);
859 860 861 862
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"3368 Failed to allocated IOTAG for"
					" XRI:0x%x\n", lxri);
			lpfc_sli4_free_xri(phba, lxri);
863 864
			break;
		}
865 866
		psb->cur_iocbq.sli4_lxritag = lxri;
		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
867 868
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
		psb->fcp_bpl = psb->data;
869
		psb->fcp_cmnd = (psb->data + sgl_size);
870 871 872 873 874 875
		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
					sizeof(struct fcp_cmnd));

		/* Initialize local short-hand pointers. */
		sgl = (struct sli4_sge *)psb->fcp_bpl;
		pdma_phys_bpl = psb->dma_handle;
876
		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
877 878 879
		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);

		/*
880 881 882
		 * The first two bdes are the FCP_CMD and FCP_RSP.
		 * The balance are sg list bdes. Initialize the
		 * first two and leave the rest for queuecommand.
883 884 885
		 */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
886
		sgl->word2 = le32_to_cpu(sgl->word2);
887 888
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
889
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
890 891 892 893 894
		sgl++;

		/* Setup the physical region for the FCP RSP */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
895
		sgl->word2 = le32_to_cpu(sgl->word2);
896 897
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
898
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
		/* setting the BLP size to 2 * sizeof BDE may not be correct.
		 * We are setting the bpl to point to out sgl. An sgl's
		 * entries are 16 bytes, a bpl entries are 12 bytes.
		 */
		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
		iocb->ulpBdeCount = 1;
		iocb->ulpLe = 1;
		iocb->ulpClass = CLASS3;
917
		psb->cur_iocbq.context1 = psb;
918 919
		psb->dma_phys_bpl = pdma_phys_bpl;

920 921
		/* add the scsi buffer to a post list */
		list_add_tail(&psb->list, &post_sblist);
922
		spin_lock_irq(&phba->scsi_buf_list_get_lock);
923
		phba->sli4_hba.scsi_xri_cnt++;
924
		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
925 926 927 928 929 930 931 932 933 934 935 936 937
	}
	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
			"3021 Allocate %d out of %d requested new SCSI "
			"buffers\n", bcnt, num_to_alloc);

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist))
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
							  &post_sblist, bcnt);
	else
		num_posted = 0;

	return num_posted;
938 939
}

940
/**
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
 * This routine wraps the actual SCSI buffer allocator function pointer from
 * the lpfc_hba struct.
 *
 * Return codes:
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static inline int
lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
{
	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
}

/**
959
 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
960
 * @phba: The HBA for which this call is being executed.
961 962 963 964 965 966 967 968
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
969
static struct lpfc_scsi_buf*
970
lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
已提交
971
{
972
	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
973
	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
974
	unsigned long iflag = 0;
975

976
	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
977 978 979
	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
			 list);
	if (!lpfc_cmd) {
980
		spin_lock(&phba->scsi_buf_list_put_lock);
981 982 983 984 985
		list_splice(&phba->lpfc_scsi_buf_list_put,
			    &phba->lpfc_scsi_buf_list_get);
		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
		list_remove_head(scsi_buf_list_get, lpfc_cmd,
				 struct lpfc_scsi_buf, list);
986
		spin_unlock(&phba->scsi_buf_list_put_lock);
987
	}
988
	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
989 990
	return  lpfc_cmd;
}
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
/**
 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
1005
	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1006
	unsigned long iflag = 0;
1007 1008
	int found = 0;

1009
	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1010 1011
	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
				 &phba->lpfc_scsi_buf_list_get, list) {
1012
		if (lpfc_test_rrq_active(phba, ndlp,
1013
					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1014 1015
			continue;
		list_del(&lpfc_cmd->list);
1016
		found = 1;
1017
		break;
1018
	}
1019
	if (!found) {
1020
		spin_lock(&phba->scsi_buf_list_put_lock);
1021 1022 1023
		list_splice(&phba->lpfc_scsi_buf_list_put,
			    &phba->lpfc_scsi_buf_list_get);
		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1024
		spin_unlock(&phba->scsi_buf_list_put_lock);
1025 1026
		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
					 &phba->lpfc_scsi_buf_list_get, list) {
1027 1028 1029 1030 1031 1032 1033 1034
			if (lpfc_test_rrq_active(
				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
				continue;
			list_del(&lpfc_cmd->list);
			found = 1;
			break;
		}
	}
1035
	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1036 1037
	if (!found)
		return NULL;
1038
	return  lpfc_cmd;
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
}
/**
 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
	return  phba->lpfc_get_scsi_buf(phba, ndlp);
}
已提交
1056

1057
/**
1058
 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1059 1060 1061 1062 1063 1064
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
1065
static void
1066
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1067
{
1068
	unsigned long iflag = 0;
已提交
1069

1070 1071 1072 1073 1074
	psb->seg_cnt = 0;
	psb->nonsg_phys = 0;
	psb->prot_seg_cnt = 0;

	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1075
	psb->pCmd = NULL;
1076
	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1077 1078
	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
已提交
1079 1080
}

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
/**
 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
 * and cannot be reused for at least RA_TOV amount of time if it was
 * aborted.
 **/
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
	unsigned long iflag = 0;

1096 1097 1098 1099
	psb->seg_cnt = 0;
	psb->nonsg_phys = 0;
	psb->prot_seg_cnt = 0;

1100
	if (psb->exch_busy) {
1101 1102 1103 1104 1105 1106 1107 1108 1109
		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
		psb->pCmd = NULL;
		list_add_tail(&psb->list,
			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
	} else {
		psb->pCmd = NULL;
1110
		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1111 1112 1113
		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1114 1115 1116
	}
}

1117
/**
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{

	phba->lpfc_release_scsi_buf(phba, psb);
}

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
/**
 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
 * @data: A pointer to the immediate command data portion of the IOCB.
 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
 *
 * The routine copies the entire FCP command from @fcp_cmnd to @data while
 * byte swapping the data to big endian format for transmission on the wire.
 **/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
	int i, j;

	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
	     i += sizeof(uint32_t), j++) {
		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
	}
}

1151 1152
/**
 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1153 1154 1155 1156
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1157 1158 1159
 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
 * through sg elements and format the bdea. This routine also initializes all
 * IOCB fields which are dependent on scsi command request buffer.
1160 1161 1162 1163 1164
 *
 * Return codes:
 *   1 - Error
 *   0 - Success
 **/
已提交
1165
static int
1166
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
已提交
1167 1168 1169 1170 1171
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1172
	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
已提交
1173
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1174
	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
已提交
1175
	dma_addr_t physaddr;
1176
	uint32_t num_bde = 0;
1177
	int nseg, datadir = scsi_cmnd->sc_data_direction;
已提交
1178 1179 1180 1181 1182 1183 1184 1185

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	bpl += 2;
1186
	if (scsi_sg_count(scsi_cmnd)) {
已提交
1187 1188 1189 1190 1191 1192 1193
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

1194 1195 1196 1197 1198
		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
				  scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!nseg))
			return 1;

1199
		lpfc_cmd->seg_cnt = nseg;
已提交
1200
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1201 1202
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9064 BLKGRD: %s: Too many sg segments from "
1203
			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1204
			       __func__, phba->cfg_sg_seg_cnt,
已提交
1205
			       lpfc_cmd->seg_cnt);
1206
			lpfc_cmd->seg_cnt = 0;
1207
			scsi_dma_unmap(scsi_cmnd);
已提交
1208 1209 1210 1211 1212 1213 1214 1215
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the bde's.
1216 1217 1218
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
已提交
1219
		 */
1220
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
已提交
1221
			physaddr = sg_dma_address(sgel);
1222
			if (phba->sli_rev == 3 &&
1223
			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1224
			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
				data_bde->addrLow = putPaddrLow(physaddr);
				data_bde->addrHigh = putPaddrHigh(physaddr);
				data_bde++;
			} else {
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				bpl->tus.f.bdeSize = sg_dma_len(sgel);
				bpl->tus.w = le32_to_cpu(bpl->tus.w);
				bpl->addrLow =
					le32_to_cpu(putPaddrLow(physaddr));
				bpl->addrHigh =
					le32_to_cpu(putPaddrHigh(physaddr));
				bpl++;
			}
已提交
1241
		}
1242
	}
已提交
1243 1244 1245

	/*
	 * Finish initializing those IOCB fields that are dependent on the
1246 1247 1248
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized and for SLI-3 the extended bde count is
	 * explicitly reinitialized since all iocb memory resources are reused.
已提交
1249
	 */
1250
	if (phba->sli_rev == 3 &&
1251 1252
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
			/*
			 * The extended IOCB format can only fit 3 BDE or a BPL.
			 * This I/O has more than 3 BDE so the 1st data bde will
			 * be a BPL that is filled in here.
			 */
			physaddr = lpfc_cmd->dma_handle;
			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
			data_bde->tus.f.bdeSize = (num_bde *
						   sizeof(struct ulp_bde64));
			physaddr += (sizeof(struct fcp_cmnd) +
				     sizeof(struct fcp_rsp) +
				     (2 * sizeof(struct ulp_bde64)));
			data_bde->addrHigh = putPaddrHigh(physaddr);
			data_bde->addrLow = putPaddrLow(physaddr);
L
Lucas De Marchi 已提交
1268
			/* ebde count includes the response bde and data bpl */
1269 1270
			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
		} else {
L
Lucas De Marchi 已提交
1271
			/* ebde count includes the response bde and data bdes */
1272 1273 1274 1275 1276
			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
		}
	} else {
		iocb_cmd->un.fcpi64.bdl.bdeSize =
			((num_bde + 2) * sizeof(struct ulp_bde64));
1277
		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1278
	}
1279
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1280 1281 1282 1283 1284

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
1285
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1286
	lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1287 1288 1289
	return 0;
}

1290
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1291

1292 1293 1294 1295 1296 1297 1298 1299
/* Return if if error injection is detected by Initiator */
#define BG_ERR_INIT	0x1
/* Return if if error injection is detected by Target */
#define BG_ERR_TGT	0x2
/* Return if if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP	0x10
/* Return if disabling Guard/Ref/App checking is required for error injection */
#define BG_ERR_CHECK	0x20
1300 1301 1302 1303

/**
 * lpfc_bg_err_inject - Determine if we should inject an error
 * @phba: The Hba for which this call is being executed.
1304 1305 1306 1307 1308
 * @sc: The SCSI command to examine
 * @reftag: (out) BlockGuard reference tag for transmitted data
 * @apptag: (out) BlockGuard application tag for transmitted data
 * @new_guard (in) Value to replace CRC with if needed
 *
1309
 * Returns BG_ERR_* bit mask or 0 if request ignored
1310
 **/
1311 1312 1313 1314 1315 1316
static int
lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
1317
	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1318
	struct scsi_dif_tuple *src = NULL;
1319 1320
	struct lpfc_nodelist *ndlp;
	struct lpfc_rport_data *rdata;
1321 1322 1323 1324 1325
	uint32_t op = scsi_get_prot_op(sc);
	uint32_t blksize;
	uint32_t numblks;
	sector_t lba;
	int rc = 0;
1326
	int blockoff = 0;
1327 1328 1329 1330

	if (op == SCSI_PROT_NORMAL)
		return 0;

1331 1332
	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);
1333
	lba = scsi_get_lba(sc);
1334 1335

	/* First check if we need to match the LBA */
1336 1337 1338 1339 1340 1341 1342 1343
	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
		blksize = lpfc_cmd_blksize(sc);
		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;

		/* Make sure we have the right LBA if one is specified */
		if ((phba->lpfc_injerr_lba < lba) ||
			(phba->lpfc_injerr_lba >= (lba + numblks)))
			return 0;
1344 1345 1346 1347 1348 1349 1350
		if (sgpe) {
			blockoff = phba->lpfc_injerr_lba - lba;
			numblks = sg_dma_len(sgpe) /
				sizeof(struct scsi_dif_tuple);
			if (numblks < blockoff)
				blockoff = numblks;
		}
1351 1352
	}

1353
	/* Next check if we need to match the remote NPortID or WWPN */
1354
	rdata = lpfc_rport_data_from_scsi_device(sc->device);
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
	if (rdata && rdata->pnode) {
		ndlp = rdata->pnode;

		/* Make sure we have the right NPortID if one is specified */
		if (phba->lpfc_injerr_nportid  &&
			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
			return 0;

		/*
		 * Make sure we have the right WWPN if one is specified.
		 * wwn[0] should be a non-zero NAA in a good WWPN.
		 */
		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
				sizeof(struct lpfc_name)) != 0))
			return 0;
	}

	/* Setup a ptr to the protection data if the SCSI host provides it */
	if (sgpe) {
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
		src += blockoff;
		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
	}

1380 1381
	/* Should we change the Reference Tag */
	if (reftag) {
1382 1383 1384
		if (phba->lpfc_injerr_wref_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1385 1386 1387 1388 1389 1390 1391 1392
				if (src) {
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */
1393 1394 1395 1396 1397

					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9076 BLKGRD: Injecting reftag error: "
					"write lba x%lx + x%x oldrefTag x%x\n",
					(unsigned long)lba, blockoff,
1398
					be32_to_cpu(src->ref_tag));
1399

1400
					/*
1401 1402
					 * Save the old ref_tag so we can
					 * restore it on completion.
1403
					 */
1404 1405 1406 1407 1408 1409 1410 1411 1412
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_REFTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->ref_tag;
					}
					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1413
					phba->lpfc_injerr_wref_cnt--;
1414 1415 1416 1417 1418 1419 1420
					if (phba->lpfc_injerr_wref_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1421 1422
					rc = BG_ERR_TGT | BG_ERR_CHECK;

1423 1424 1425
					break;
				}
				/* Drop thru */
1426
			case SCSI_PROT_WRITE_INSERT:
1427
				/*
1428 1429 1430
				 * For WRITE_INSERT, force the error
				 * to be sent on the wire. It should be
				 * detected by the Target.
1431
				 */
1432
				/* DEADBEEF will be the reftag on the wire */
1433 1434
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1435 1436 1437 1438 1439 1440 1441
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
					LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1442
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1443 1444

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1445
					"9078 BLKGRD: Injecting reftag error: "
1446 1447
					"write lba x%lx\n", (unsigned long)lba);
				break;
1448
			case SCSI_PROT_WRITE_STRIP:
1449
				/*
1450 1451 1452
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1453
				 */
1454 1455
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1456 1457 1458 1459 1460 1461 1462
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1463
				rc = BG_ERR_INIT;
1464 1465

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1466
					"9077 BLKGRD: Injecting reftag error: "
1467
					"write lba x%lx\n", (unsigned long)lba);
1468
				break;
1469
			}
1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
		}
		if (phba->lpfc_injerr_rref_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1481 1482
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_rref_cnt--;
1483 1484 1485 1486 1487 1488 1489
				if (phba->lpfc_injerr_rref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1490
				rc = BG_ERR_INIT;
1491 1492

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1493
					"9079 BLKGRD: Injecting reftag error: "
1494
					"read lba x%lx\n", (unsigned long)lba);
1495
				break;
1496 1497 1498 1499 1500 1501
			}
		}
	}

	/* Should we change the Application Tag */
	if (apptag) {
1502 1503 1504
		if (phba->lpfc_injerr_wapp_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1505
				if (src) {
1506 1507 1508 1509 1510 1511 1512 1513
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */

1514 1515 1516 1517
					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9080 BLKGRD: Injecting apptag error: "
					"write lba x%lx + x%x oldappTag x%x\n",
					(unsigned long)lba, blockoff,
1518
					be16_to_cpu(src->app_tag));
1519 1520

					/*
1521 1522
					 * Save the old app_tag so we can
					 * restore it on completion.
1523
					 */
1524 1525 1526 1527 1528 1529 1530 1531 1532
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_APPTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->app_tag;
					}
					src->app_tag = cpu_to_be16(0xDEAD);
1533
					phba->lpfc_injerr_wapp_cnt--;
1534 1535 1536 1537 1538 1539 1540
					if (phba->lpfc_injerr_wapp_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1541
					rc = BG_ERR_TGT | BG_ERR_CHECK;
1542 1543 1544
					break;
				}
				/* Drop thru */
1545
			case SCSI_PROT_WRITE_INSERT:
1546
				/*
1547 1548 1549
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1550
				 */
1551
				/* DEAD will be the apptag on the wire */
1552 1553
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1554 1555 1556 1557 1558 1559 1560
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1561
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1562

1563
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1564
					"0813 BLKGRD: Injecting apptag error: "
1565 1566
					"write lba x%lx\n", (unsigned long)lba);
				break;
1567
			case SCSI_PROT_WRITE_STRIP:
1568
				/*
1569 1570 1571
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1572
				 */
1573 1574
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1575 1576 1577 1578 1579 1580 1581
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1582
				rc = BG_ERR_INIT;
1583 1584

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1585
					"0812 BLKGRD: Injecting apptag error: "
1586
					"write lba x%lx\n", (unsigned long)lba);
1587
				break;
1588
			}
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
		}
		if (phba->lpfc_injerr_rapp_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1600 1601
				*apptag = 0xDEAD;
				phba->lpfc_injerr_rapp_cnt--;
1602 1603 1604 1605 1606 1607 1608
				if (phba->lpfc_injerr_rapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1609
				rc = BG_ERR_INIT;
1610 1611

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1612
					"0814 BLKGRD: Injecting apptag error: "
1613
					"read lba x%lx\n", (unsigned long)lba);
1614
				break;
1615 1616 1617 1618
			}
		}
	}

1619

1620
	/* Should we change the Guard Tag */
1621 1622 1623 1624
	if (new_guard) {
		if (phba->lpfc_injerr_wgrd_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1625
				rc = BG_ERR_CHECK;
1626
				/* Drop thru */
1627 1628

			case SCSI_PROT_WRITE_INSERT:
1629
				/*
1630 1631 1632
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1633 1634
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1635 1636 1637 1638 1639 1640 1641
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1642

1643
				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1644
				/* Signals the caller to swap CRC->CSUM */
1645

1646
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1647
					"0817 BLKGRD: Injecting guard error: "
1648 1649
					"write lba x%lx\n", (unsigned long)lba);
				break;
1650
			case SCSI_PROT_WRITE_STRIP:
1651
				/*
1652 1653 1654
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1655 1656
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1657 1658 1659 1660 1661 1662 1663
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1664

1665
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1666 1667 1668
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1669
					"0816 BLKGRD: Injecting guard error: "
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
					"write lba x%lx\n", (unsigned long)lba);
				break;
			}
		}
		if (phba->lpfc_injerr_rgrd_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
				phba->lpfc_injerr_rgrd_cnt--;
1685 1686 1687 1688 1689 1690 1691
				if (phba->lpfc_injerr_rgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1692

1693
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1694 1695 1696 1697 1698 1699
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"0818 BLKGRD: Injecting guard error: "
					"read lba x%lx\n", (unsigned long)lba);
			}
1700 1701
		}
	}
1702

1703 1704 1705 1706
	return rc;
}
#endif

1707 1708 1709 1710
/**
 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
 * the specified SCSI command.
 * @phba: The Hba for which this call is being executed.
1711 1712 1713 1714 1715 1716
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
1717
 **/
1718
static int
1719 1720
lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
1721
{
1722
	uint8_t ret = 0;
1723

1724
	if (lpfc_cmd_guard_csum(sc)) {
1725 1726 1727
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
1728
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1729
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1730 1731 1732 1733
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1734
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1735
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1736 1737
			break;

1738 1739
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1740
			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1741
			*txop = BG_OP_IN_CSUM_OUT_CRC;
1742 1743 1744 1745
			break;

		case SCSI_PROT_NORMAL:
		default:
1746
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1747 1748
				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
					scsi_get_prot_op(sc));
1749
			ret = 1;
1750 1751 1752
			break;

		}
J
James Smart 已提交
1753
	} else {
1754 1755 1756
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1757
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1758
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1759 1760 1761 1762
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1763
			*rxop = BG_OP_IN_CRC_OUT_CRC;
1764
			*txop = BG_OP_IN_CRC_OUT_CRC;
1765 1766 1767 1768
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
J
James Smart 已提交
1769
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1770
			*txop = BG_OP_IN_CRC_OUT_NODIF;
J
James Smart 已提交
1771 1772
			break;

1773 1774
		case SCSI_PROT_NORMAL:
		default:
1775
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1776 1777
				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
					scsi_get_prot_op(sc));
1778
			ret = 1;
1779 1780 1781 1782
			break;
		}
	}

1783
	return ret;
1784 1785
}

1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
 * the specified SCSI command in order to force a guard tag error.
 * @phba: The Hba for which this call is being executed.
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
 **/
static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
{
	uint8_t ret = 0;

1804
	if (lpfc_cmd_guard_csum(sc)) {
1805 1806 1807 1808
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1809
			*txop = BG_OP_IN_CRC_OUT_NODIF;
1810 1811 1812 1813 1814
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1815
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1816 1817 1818 1819
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1820
			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1821
			*txop = BG_OP_IN_CRC_OUT_CSUM;
1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;

		}
	} else {
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1834
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1835 1836 1837 1838
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1839
			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1840
			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1841 1842 1843 1844 1845
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1846
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;
		}
	}

	return ret;
}
#endif

/**
 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
1867 1868 1869 1870 1871 1872 1873 1874 1875
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
1876 1877 1878
 *   start of prot group  -->     |          PDE_5          |
 *                                +-------------------------+
 *                                |          PDE_6          |
1879 1880 1881 1882 1883 1884 1885 1886
 *                                +-------------------------+
 *                                |         Data BDE        |
 *                                +-------------------------+
 *                                |more Data BDE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
1887 1888 1889
 *
 * Returns the number of BDEs added to the BPL.
 **/
1890 1891 1892 1893 1894
static int
lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
1895 1896
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
1897
	dma_addr_t physaddr;
1898
	int i = 0, num_bde = 0, status;
1899
	int datadir = sc->sc_data_direction;
1900
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1901
	uint32_t rc;
1902
#endif
1903
	uint32_t checking = 1;
1904
	uint32_t reftag;
J
James Smart 已提交
1905
	unsigned blksize;
1906
	uint8_t txop, rxop;
1907

1908 1909
	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
1910 1911
		goto out;

1912
	/* extract some info from the scsi command for pde*/
1913
	blksize = lpfc_cmd_blksize(sc);
1914
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1915

1916
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1917
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1918
	if (rc) {
1919
		if (rc & BG_ERR_SWAP)
1920
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1921
		if (rc & BG_ERR_CHECK)
1922 1923
			checking = 0;
	}
1924 1925
#endif

1926 1927 1928 1929 1930
	/* setup PDE5 with what we have */
	pde5 = (struct lpfc_pde5 *) bpl;
	memset(pde5, 0, sizeof(struct lpfc_pde5));
	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

1931
	/* Endianness conversion if necessary for PDE5 */
1932
	pde5->word0 = cpu_to_le32(pde5->word0);
J
James Smart 已提交
1933
	pde5->reftag = cpu_to_le32(reftag);
1934

1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
	/* advance bpl and increment bde count */
	num_bde++;
	bpl++;
	pde6 = (struct lpfc_pde6 *) bpl;

	/* setup PDE6 with the rest of the info */
	memset(pde6, 0, sizeof(struct lpfc_pde6));
	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
	bf_set(pde6_optx, pde6, txop);
	bf_set(pde6_oprx, pde6, rxop);
1945 1946 1947 1948 1949

	/*
	 * We only need to check the data on READs, for WRITEs
	 * protection data is automatically generated, not checked.
	 */
1950
	if (datadir == DMA_FROM_DEVICE) {
1951
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1952 1953 1954 1955
			bf_set(pde6_ce, pde6, checking);
		else
			bf_set(pde6_ce, pde6, 0);

1956
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1957 1958 1959
			bf_set(pde6_re, pde6, checking);
		else
			bf_set(pde6_re, pde6, 0);
1960 1961
	}
	bf_set(pde6_ai, pde6, 1);
J
James Smart 已提交
1962 1963
	bf_set(pde6_ae, pde6, 0);
	bf_set(pde6_apptagval, pde6, 0);
1964

1965
	/* Endianness conversion if necessary for PDE6 */
1966 1967 1968 1969
	pde6->word0 = cpu_to_le32(pde6->word0);
	pde6->word1 = cpu_to_le32(pde6->word1);
	pde6->word2 = cpu_to_le32(pde6->word2);

1970
	/* advance bpl and increment bde count */
1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
	num_bde++;
	bpl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
		bpl->tus.f.bdeSize = sg_dma_len(sgde);
		if (datadir == DMA_TO_DEVICE)
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		else
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
		bpl->tus.w = le32_to_cpu(bpl->tus.w);
		bpl++;
		num_bde++;
	}

out:
	return num_bde;
}

1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
/**
 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
2003 2004 2005 2006 2007 2008 2009 2010 2011
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
2012
 *   start of first prot group  -->   |          PDE_5          |
2013
 *                                    +-------------------------+
2014 2015 2016
 *                                    |          PDE_6          |
 *                                    +-------------------------+
 *                                    |      PDE_7 (Prot BDE)   |
2017 2018 2019 2020 2021
 *                                    +-------------------------+
 *                                    |        Data BDE         |
 *                                    +-------------------------+
 *                                    |more Data BDE's ... (opt)|
 *                                    +-------------------------+
2022
 *   start of new  prot group  -->    |          PDE_5          |
2023 2024 2025 2026 2027 2028
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
2029 2030 2031
 *
 * Returns the number of BDEs added to the BPL.
 **/
2032 2033 2034 2035 2036 2037
static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2038 2039
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
2040
	struct lpfc_pde7 *pde7 = NULL;
2041 2042
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
2043 2044
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2045 2046
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
2047
	int status;
2048 2049 2050
	int datadir = sc->sc_data_direction;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
2051
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2052
	uint32_t rc;
2053
#endif
2054
	uint32_t checking = 1;
2055
	uint32_t reftag;
2056
	uint8_t txop, rxop;
2057 2058 2059 2060 2061 2062 2063
	int num_bde = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
				sgpe, sgde);
		return 0;
	}

	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command */
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2078
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2079
	if (rc) {
2080
		if (rc & BG_ERR_SWAP)
2081
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2082
		if (rc & BG_ERR_CHECK)
2083 2084 2085 2086 2087 2088
			checking = 0;
	}
#endif

	split_offset = 0;
	do {
2089 2090 2091 2092
		/* Check to see if we ran out of space */
		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
			return num_bde + 3;

2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111
		/* setup PDE5 with what we have */
		pde5 = (struct lpfc_pde5 *) bpl;
		memset(pde5, 0, sizeof(struct lpfc_pde5));
		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

		/* Endianness conversion if necessary for PDE5 */
		pde5->word0 = cpu_to_le32(pde5->word0);
		pde5->reftag = cpu_to_le32(reftag);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;
		pde6 = (struct lpfc_pde6 *) bpl;

		/* setup PDE6 with the rest of the info */
		memset(pde6, 0, sizeof(struct lpfc_pde6));
		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
		bf_set(pde6_optx, pde6, txop);
		bf_set(pde6_oprx, pde6, rxop);
2112

2113
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2114 2115 2116 2117
			bf_set(pde6_ce, pde6, checking);
		else
			bf_set(pde6_ce, pde6, 0);

2118
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2119 2120 2121 2122
			bf_set(pde6_re, pde6, checking);
		else
			bf_set(pde6_re, pde6, 0);

2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
		bf_set(pde6_ai, pde6, 1);
		bf_set(pde6_ae, pde6, 0);
		bf_set(pde6_apptagval, pde6, 0);

		/* Endianness conversion if necessary for PDE6 */
		pde6->word0 = cpu_to_le32(pde6->word0);
		pde6->word1 = cpu_to_le32(pde6->word1);
		pde6->word2 = cpu_to_le32(pde6->word2);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;

		/* setup the first BDE that points to protection buffer */
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

		pde7 = (struct lpfc_pde7 *) bpl;
		memset(pde7, 0, sizeof(struct lpfc_pde7));
		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);

		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));

		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

		/* check if this pde is crossing the 4K boundary; if so split */
		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
			protgrp_bytes = protgrp_blks * blksize;
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}

		num_bde++;

		/* setup BDE's for data blocks associated with DIF data */
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2170 2171 2172 2173
			/* Check to see if we ran out of space */
			if (num_bde >= phba->cfg_total_seg_cnt)
				return num_bde + 1;

2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
			if (!sgde) {
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9065 BLKGRD:%s Invalid data segment\n",
						__func__);
				return 0;
			}
			bpl++;
			dataphysaddr = sg_dma_address(sgde) + split_offset;
			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
				bpl->tus.f.bdeSize = remainder;
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
				split_offset += bpl->tus.f.bdeSize;
			}

			subtotal += bpl->tus.f.bdeSize;

			if (datadir == DMA_TO_DEVICE)
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			else
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
			bpl->tus.w = le32_to_cpu(bpl->tus.w);

			num_bde++;
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);

		}

		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
			bpl++;
			continue;
		}

		/* are we done ? */
		if (curr_prot == protcnt) {
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
			bpl++;

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9054 BLKGRD: bug in %s\n", __func__);
		}

	} while (!alldone);
out:

	return num_bde;
}

/**
 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
 *   start of prot group  -->     |         DI_SEED         |
 *                                +-------------------------+
 *                                |         Data SGE        |
 *                                +-------------------------+
 *                                |more Data SGE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t physaddr;
	int i = 0, num_sge = 0, status;
	uint32_t reftag;
	unsigned blksize;
	uint8_t txop, rxop;
2288
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2289
	uint32_t rc;
2290
#endif
2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
	uint32_t checking = 1;
	uint32_t dma_len;
	uint32_t dma_offset = 0;

	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command for pde*/
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2304
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2305
	if (rc) {
2306
		if (rc & BG_ERR_SWAP)
2307
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2308
		if (rc & BG_ERR_CHECK)
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
			checking = 0;
	}
#endif

	/* setup DISEED with what we have */
	diseed = (struct sli4_sge_diseed *) sgl;
	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

	/* Endianness conversion if necessary */
	diseed->ref_tag = cpu_to_le32(reftag);
	diseed->ref_tag_tran = diseed->ref_tag;

2322 2323 2324 2325 2326
	/*
	 * We only need to check the data on READs, for WRITEs
	 * protection data is automatically generated, not checked.
	 */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2327
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2328 2329 2330 2331
			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);

2332
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2333 2334 2335 2336 2337
			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
	}

2338 2339 2340
	/* setup DISEED with the rest of the info */
	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2341

2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432
	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

	/* Endianness conversion if necessary for DISEED */
	diseed->word2 = cpu_to_le32(diseed->word2);
	diseed->word3 = cpu_to_le32(diseed->word3);

	/* advance bpl and increment sge count */
	num_sge++;
	sgl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		dma_len = sg_dma_len(sgde);
		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
		if ((i + 1) == datasegcnt)
			bf_set(lpfc_sli4_sge_last, sgl, 1);
		else
			bf_set(lpfc_sli4_sge_last, sgl, 0);
		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);

		sgl->sge_len = cpu_to_le32(dma_len);
		dma_offset += dma_len;

		sgl++;
		num_sge++;
	}

out:
	return num_sge;
}

/**
 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
 *   start of first prot group  -->   |         DISEED          |
 *                                    +-------------------------+
 *                                    |      DIF (Prot SGE)     |
 *                                    +-------------------------+
 *                                    |        Data SGE         |
 *                                    +-------------------------+
 *                                    |more Data SGE's ... (opt)|
 *                                    +-------------------------+
 *   start of new  prot group  -->    |         DISEED          |
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
	int status;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
	uint32_t reftag;
	uint8_t txop, rxop;
	uint32_t dma_len;
2433
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2434
	uint32_t rc;
2435
#endif
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
	uint32_t checking = 1;
	uint32_t dma_offset = 0;
	int num_sge = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2446 2447 2448 2449
				sgpe, sgde);
		return 0;
	}

2450 2451
	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
2452 2453
		goto out;

2454
	/* extract some info from the scsi command */
2455
	blksize = lpfc_cmd_blksize(sc);
2456
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2457

2458
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2459
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2460
	if (rc) {
2461
		if (rc & BG_ERR_SWAP)
2462
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2463
		if (rc & BG_ERR_CHECK)
2464 2465
			checking = 0;
	}
2466 2467
#endif

2468 2469
	split_offset = 0;
	do {
2470 2471 2472 2473
		/* Check to see if we ran out of space */
		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
			return num_sge + 3;

2474 2475 2476 2477 2478 2479 2480 2481 2482
		/* setup DISEED with what we have */
		diseed = (struct sli4_sge_diseed *) sgl;
		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

		/* Endianness conversion if necessary */
		diseed->ref_tag = cpu_to_le32(reftag);
		diseed->ref_tag_tran = diseed->ref_tag;

2483
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);

		} else {
			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
			/*
			 * When in this mode, the hardware will replace
			 * the guard tag from the host with a
			 * newly generated good CRC for the wire.
			 * Switch to raw mode here to avoid this
			 * behavior. What the host sends gets put on the wire.
			 */
			if (txop == BG_OP_IN_CRC_OUT_CRC) {
				txop = BG_OP_RAW_MODE;
				rxop = BG_OP_RAW_MODE;
			}
		}


2502
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2503 2504 2505 2506
			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);

2507 2508 2509
		/* setup DISEED with the rest of the info */
		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2510

2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

		/* Endianness conversion if necessary for DISEED */
		diseed->word2 = cpu_to_le32(diseed->word2);
		diseed->word3 = cpu_to_le32(diseed->word3);

		/* advance sgl and increment bde count */
		num_sge++;
		sgl++;
2521 2522

		/* setup the first BDE that points to protection buffer */
2523 2524
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2525 2526 2527 2528

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

2529 2530 2531 2532 2533 2534
		/* Now setup DIF SGE */
		sgl->word2 = 0;
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
		sgl->word2 = cpu_to_le32(sgl->word2);
2535

2536 2537 2538
		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

2539 2540 2541
		/* check if DIF SGE is crossing the 4K boundary; if so split */
		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2542 2543
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
J
James Smart 已提交
2544
			protgrp_bytes = protgrp_blks * blksize;
2545 2546 2547 2548
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}
2549

2550
		num_sge++;
2551

2552
		/* setup SGE's for data blocks associated with DIF data */
2553 2554 2555
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2556 2557 2558 2559
			/* Check to see if we ran out of space */
			if (num_sge >= phba->cfg_total_seg_cnt)
				return num_sge + 1;

2560
			if (!sgde) {
2561
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2562
					"9086 BLKGRD:%s Invalid data segment\n",
2563 2564 2565
						__func__);
				return 0;
			}
2566
			sgl++;
2567 2568 2569 2570 2571 2572
			dataphysaddr = sg_dma_address(sgde) + split_offset;

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
2573
				dma_len = remainder;
2574 2575 2576 2577 2578 2579
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
2580 2581
				dma_len = protgrp_bytes - subtotal;
				split_offset += dma_len;
2582 2583
			}

2584
			subtotal += dma_len;
2585

2586 2587 2588 2589 2590
			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
			bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2591

2592 2593 2594 2595
			sgl->sge_len = cpu_to_le32(dma_len);
			dma_offset += dma_len;

			num_sge++;
2596 2597 2598 2599 2600 2601 2602 2603 2604
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);
		}

2605 2606 2607
		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
2608
			sgl++;
2609 2610 2611
			continue;
		}

2612 2613
		/* are we done ? */
		if (curr_prot == protcnt) {
2614
			bf_set(lpfc_sli4_sge_last, sgl, 1);
2615 2616 2617 2618
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
2619
			sgl++;
2620 2621 2622 2623 2624

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
2625
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2626
				"9085 BLKGRD: bug in %s\n", __func__);
2627 2628 2629
		}

	} while (!alldone);
2630

2631 2632
out:

2633
	return num_sge;
2634
}
2635

2636 2637 2638 2639 2640
/**
 * lpfc_prot_group_type - Get prtotection group type of SCSI command
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 *
2641 2642 2643
 * Given a SCSI command that supports DIF, determine composition of protection
 * groups involved in setting up buffer lists
 *
2644 2645 2646
 * Returns: Protection group type (with or without DIF)
 *
 **/
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
{
	int ret = LPFC_PG_TYPE_INVALID;
	unsigned char op = scsi_get_prot_op(sc);

	switch (op) {
	case SCSI_PROT_READ_STRIP:
	case SCSI_PROT_WRITE_INSERT:
		ret = LPFC_PG_TYPE_NO_DIF;
		break;
	case SCSI_PROT_READ_INSERT:
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_PASS:
	case SCSI_PROT_WRITE_PASS:
		ret = LPFC_PG_TYPE_DIF_BUF;
		break;
	default:
2665 2666 2667 2668
		if (phba)
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9021 Unsupported protection op:%d\n",
					op);
2669 2670 2671 2672 2673
		break;
	}
	return ret;
}

2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
/**
 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
 *
 * Adjust the data length to account for how much data
 * is actually on the wire.
 *
 * returns the adjusted data length
 **/
static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
		       struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
	int fcpdl;

	fcpdl = scsi_bufflen(sc);

	/* Check if there is protection data on the wire */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2695
		/* Read check for protection data */
2696 2697 2698 2699
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
			return fcpdl;

	} else {
2700
		/* Write check for protection data */
2701 2702 2703 2704 2705 2706
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
			return fcpdl;
	}

	/*
	 * If we are in DIF Type 1 mode every data block has a 8 byte
2707 2708
	 * DIF (trailer) attached to it. Must ajust FCP data length
	 * to account for the protection data.
2709
	 */
2710
	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2711 2712 2713 2714

	return fcpdl;
}

2715 2716 2717 2718 2719
/**
 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
 *
2720 2721 2722
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
2723
 **/
2724
static int
2725
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2726 2727 2728 2729 2730 2731 2732 2733 2734
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	uint32_t num_bde = 0;
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
2735
	int fcpdl;
2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755

	/*
	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
	 *  fcp_rsp regions to the first data bde entry
	 */
	bpl += 2;
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		lpfc_cmd->seg_cnt = datasegcnt;
2756 2757 2758 2759

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
2760 2761 2762 2763 2764

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
2765 2766 2767 2768 2769

			/* Here we need to add a PDE5 and PDE6 to the count */
			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
				goto err;

2770 2771
			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
					datasegcnt);
2772
			/* we should have 2 or more entries in buffer list */
2773 2774 2775
			if (num_bde < 2)
				goto err;
			break;
2776 2777

		case LPFC_PG_TYPE_DIF_BUF:
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
2792 2793 2794 2795 2796 2797 2798 2799

			/*
			 * There is a minimun of 4 BPLs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 4) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
2800 2801 2802

			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
					datasegcnt, protsegcnt);
2803
			/* we should have 3 or more entries in buffer list */
2804 2805
			if ((num_bde < 3) ||
			    (num_bde > phba->cfg_total_seg_cnt))
2806 2807
				goto err;
			break;
2808

2809 2810
		case LPFC_PG_TYPE_INVALID:
		default:
2811 2812 2813
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9022 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
	 * reinitialized since all iocb memory resources are used many times
	 * for transmit, receive, and continuation bpl's.
	 */
	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
	iocb_cmd->ulpBdeCount = 1;
	iocb_cmd->ulpLe = 1;

2832
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2833 2834 2835 2836 2837 2838 2839 2840
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

已提交
2841
	return 0;
2842
err:
2843 2844 2845 2846 2847 2848 2849
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

2850
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2851 2852 2853 2854
			"9023 Cannot setup S/G List for HBA"
			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2855
			prot_group_type, num_bde);
2856 2857 2858

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
2859 2860 2861
	return 1;
}

2862 2863 2864 2865 2866
/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CRC algorithmn
 * using crc_t10dif.
 */
2867
static uint16_t
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882
lpfc_bg_crc(uint8_t *data, int count)
{
	uint16_t crc = 0;
	uint16_t x;

	crc = crc_t10dif(data, count);
	x = cpu_to_be16(crc);
	return x;
}

/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CSUM algorithmn
 * using ip_compute_csum.
 */
2883
static uint16_t
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
lpfc_bg_csum(uint8_t *data, int count)
{
	uint16_t ret;

	ret = ip_compute_csum(data, count);
	return ret;
}

/*
 * This function examines the protection data to try to determine
 * what type of T10-DIF error occurred.
 */
2896
static void
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946
lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct scsi_dif_tuple *src = NULL;
	uint8_t *data_src = NULL;
	uint16_t guard_tag, guard_type;
	uint16_t start_app_tag, app_tag;
	uint32_t start_ref_tag, ref_tag;
	int prot, protsegcnt;
	int err_type, len, data_len;
	int chk_ref, chk_app, chk_guard;
	uint16_t sum;
	unsigned blksize;

	err_type = BGS_GUARD_ERR_MASK;
	sum = 0;
	guard_tag = 0;

	/* First check to see if there is protection data to examine */
	prot = scsi_get_prot_op(cmd);
	if ((prot == SCSI_PROT_READ_STRIP) ||
	    (prot == SCSI_PROT_WRITE_INSERT) ||
	    (prot == SCSI_PROT_NORMAL))
		goto out;

	/* Currently the driver just supports ref_tag and guard_tag checking */
	chk_ref = 1;
	chk_app = 0;
	chk_guard = 0;

	/* Setup a ptr to the protection data provided by the SCSI host */
	sgpe = scsi_prot_sglist(cmd);
	protsegcnt = lpfc_cmd->prot_seg_cnt;

	if (sgpe && protsegcnt) {

		/*
		 * We will only try to verify guard tag if the segment
		 * data length is a multiple of the blksize.
		 */
		sgde = scsi_sglist(cmd);
		blksize = lpfc_cmd_blksize(cmd);
		data_src = (uint8_t *)sg_virt(sgde);
		data_len = sgde->length;
		if ((data_len & (blksize - 1)) == 0)
			chk_guard = 1;
		guard_type = scsi_host_get_guard(cmd->device->host);

2947
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2948
		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
		start_app_tag = src->app_tag;
		len = sgpe->length;
		while (src && protsegcnt) {
			while (len) {

				/*
				 * First check to see if a protection data
				 * check is valid
				 */
				if ((src->ref_tag == 0xffffffff) ||
				    (src->app_tag == 0xffff)) {
					start_ref_tag++;
					goto skipit;
				}

2964
				/* First Guard Tag checking */
2965 2966
				if (chk_guard) {
					guard_tag = src->guard_tag;
2967
					if (lpfc_cmd_guard_csum(cmd))
2968 2969 2970 2971 2972 2973 2974 2975 2976 2977
						sum = lpfc_bg_csum(data_src,
								   blksize);
					else
						sum = lpfc_bg_crc(data_src,
								  blksize);
					if ((guard_tag != sum)) {
						err_type = BGS_GUARD_ERR_MASK;
						goto out;
					}
				}
2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992

				/* Reference Tag checking */
				ref_tag = be32_to_cpu(src->ref_tag);
				if (chk_ref && (ref_tag != start_ref_tag)) {
					err_type = BGS_REFTAG_ERR_MASK;
					goto out;
				}
				start_ref_tag++;

				/* App Tag checking */
				app_tag = src->app_tag;
				if (chk_app && (app_tag != start_app_tag)) {
					err_type = BGS_APPTAG_ERR_MASK;
					goto out;
				}
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
skipit:
				len -= sizeof(struct scsi_dif_tuple);
				if (len < 0)
					len = 0;
				src++;

				data_src += blksize;
				data_len -= blksize;

				/*
				 * Are we at the end of the Data segment?
				 * The data segment is only used for Guard
				 * tag checking.
				 */
				if (chk_guard && (data_len == 0)) {
					chk_guard = 0;
					sgde = sg_next(sgde);
					if (!sgde)
						goto out;

					data_src = (uint8_t *)sg_virt(sgde);
					data_len = sgde->length;
					if ((data_len & (blksize - 1)) == 0)
						chk_guard = 1;
				}
			}

			/* Goto the next Protection data segment */
			sgpe = sg_next(sgpe);
			if (sgpe) {
				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
				len = sgpe->length;
			} else {
				src = NULL;
			}
			protsegcnt--;
		}
	}
out:
	if (err_type == BGS_GUARD_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x1);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				sum, guard_tag);

	} else if (err_type == BGS_REFTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x3);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				ref_tag, start_ref_tag);

	} else if (err_type == BGS_APPTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x2);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				app_tag, start_app_tag);
	}
}


3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094
/*
 * This function checks for BlockGuard errors detected by
 * the HBA.  In case of errors, the ASC/ASCQ fields in the
 * sense buffer will be set accordingly, paired with
 * ILLEGAL_REQUEST to signal to the kernel that the HBA
 * detected corruption.
 *
 * Returns:
 *  0 - No error found
 *  1 - BlockGuard error found
 * -1 - Internal error (bad profile, ...etc)
 */
static int
lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
			struct lpfc_iocbq *pIocbOut)
{
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
	int ret = 0;
	uint32_t bghm = bgf->bghm;
	uint32_t bgstat = bgf->bgstat;
	uint64_t failing_sector = 0;

	spin_lock(&_dump_buf_lock);
	if (!_dump_buf_done) {
3095 3096
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
			" Data for %u blocks to debugfs\n",
3097
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3098
		lpfc_debug_save_data(phba, cmd);
3099 3100 3101 3102

		/* If we have a prot sgl, save the DIF buffer */
		if (lpfc_prot_group_type(phba, cmd) ==
				LPFC_PG_TYPE_DIF_BUF) {
3103 3104 3105 3106
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
				"Saving DIF for %u blocks to debugfs\n",
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
			lpfc_debug_save_dif(phba, cmd);
3107 3108 3109 3110 3111 3112 3113 3114
		}

		_dump_buf_done = 1;
	}
	spin_unlock(&_dump_buf_lock);

	if (lpfc_bgs_get_invalid_prof(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3115 3116 3117 3118 3119 3120
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9072 BLKGRD: Invalid BG Profile in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3121 3122 3123 3124 3125 3126
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3127 3128 3129 3130 3131 3132
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3133 3134 3135 3136 3137 3138 3139 3140 3141
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_guard_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x1);
M
Martin K. Petersen 已提交
3142
		cmd->result = DRIVER_SENSE << 24
3143 3144
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
3145 3146 3147 3148 3149 3150
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9055 BLKGRD: Guard Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3151 3152 3153 3154 3155 3156 3157
	}

	if (lpfc_bgs_get_reftag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x3);
M
Martin K. Petersen 已提交
3158
		cmd->result = DRIVER_SENSE << 24
3159 3160 3161
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
3162 3163 3164 3165 3166 3167
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9056 BLKGRD: Ref Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3168 3169 3170 3171 3172 3173 3174
	}

	if (lpfc_bgs_get_apptag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x2);
M
Martin K. Petersen 已提交
3175
		cmd->result = DRIVER_SENSE << 24
3176 3177 3178
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
3179 3180 3181 3182 3183 3184
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9061 BLKGRD: App Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3185 3186 3187 3188 3189
	}

	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
		/*
		 * setup sense data descriptor 0 per SPC-4 as an information
J
James Smart 已提交
3190 3191 3192
		 * field, and put the failing LBA in it.
		 * This code assumes there was also a guard/app/ref tag error
		 * indication.
3193
		 */
J
James Smart 已提交
3194 3195 3196 3197
		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212

		/* bghm is a "on the wire" FC frame based count */
		switch (scsi_get_prot_op(cmd)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			bghm /= cmd->device->sector_size;
			break;
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
			bghm /= (cmd->device->sector_size +
				sizeof(struct scsi_dif_tuple));
			break;
		}
3213 3214 3215 3216

		failing_sector = scsi_get_lba(cmd);
		failing_sector += bghm;

J
James Smart 已提交
3217 3218
		/* Descriptor Information */
		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3219 3220 3221 3222
	}

	if (!ret) {
		/* No error was reported - problem in FW? */
3223 3224 3225 3226 3227 3228 3229 3230 3231
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9057 BLKGRD: Unknown error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);

		/* Calcuate what type of error it was */
		lpfc_calc_bg_err(phba, lpfc_cmd);
3232 3233 3234
	}
out:
	return ret;
已提交
3235 3236
}

3237 3238 3239 3240 3241 3242 3243 3244 3245
/**
 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 * field of @lpfc_cmd for device with SLI-4 interface spec.
 *
 * Return codes:
3246 3247
 *	1 - Error
 *	0 - Success
3248 3249 3250 3251 3252 3253 3254 3255
 **/
static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3256
	struct sli4_sge *first_data_sgl;
3257 3258 3259 3260 3261 3262
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	dma_addr_t physaddr;
	uint32_t num_bde = 0;
	uint32_t dma_len;
	uint32_t dma_offset = 0;
	int nseg;
3263
	struct ulp_bde64 *bde;
3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

		nseg = scsi_dma_map(scsi_cmnd);
		if (unlikely(!nseg))
			return 1;
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
		sgl += 1;
3288
		first_data_sgl = sgl;
3289 3290
		lpfc_cmd->seg_cnt = nseg;
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3291 3292 3293 3294
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
				" %s: Too many sg segments from "
				"dma_map_sg.  Config %d, seg_cnt %d\n",
				__func__, phba->cfg_sg_seg_cnt,
3295
			       lpfc_cmd->seg_cnt);
3296
			lpfc_cmd->seg_cnt = 0;
3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314
			scsi_dma_unmap(scsi_cmnd);
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the sge's.
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
		 */
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
			physaddr = sg_dma_address(sgel);
			dma_len = sg_dma_len(sgel);
			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3315
			sgl->word2 = le32_to_cpu(sgl->word2);
3316 3317 3318 3319 3320
			if ((num_bde + 1) == nseg)
				bf_set(lpfc_sli4_sge_last, sgl, 1);
			else
				bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3321
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3322
			sgl->word2 = cpu_to_le32(sgl->word2);
3323
			sgl->sge_len = cpu_to_le32(dma_len);
3324 3325 3326
			dma_offset += dma_len;
			sgl++;
		}
3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337
		/* setup the performance hint (first data BDE) if enabled */
		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
			bde = (struct ulp_bde64 *)
					&(iocb_cmd->unsli3.sli3Words[5]);
			bde->addrLow = first_data_sgl->addr_lo;
			bde->addrHigh = first_data_sgl->addr_hi;
			bde->tus.f.bdeSize =
					le32_to_cpu(first_data_sgl->sge_len);
			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			bde->tus.w = cpu_to_le32(bde->tus.w);
		}
3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358
	} else {
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized.
	 * all iocb memory resources are reused.
	 */
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3359 3360 3361 3362 3363

	/*
	 * If the OAS driver feature is enabled and the lun is enabled for
	 * OAS, set the oas iocb related flags.
	 */
3364
	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3365
		scsi_cmnd->device->hostdata)->oas_enabled)
3366
		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3367 3368 3369
	return 0;
}

3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386
/**
 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
 **/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3387
	uint32_t num_sge = 0;
3388 3389 3390 3391 3392 3393
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
	int fcpdl;

	/*
	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3394
	 *  fcp_rsp regions to the first data sge entry
3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);

		sgl += 1;
		lpfc_cmd->seg_cnt = datasegcnt;
3417 3418 3419 3420

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
3421 3422 3423 3424 3425

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
3426 3427 3428 3429 3430
			/* Here we need to add a DISEED to the count */
			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
				goto err;

			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3431
					datasegcnt);
3432

3433
			/* we should have 2 or more entries in buffer list */
3434
			if (num_sge < 2)
3435 3436
				goto err;
			break;
3437 3438

		case LPFC_PG_TYPE_DIF_BUF:
3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
3453 3454 3455 3456 3457 3458 3459
			/*
			 * There is a minimun of 3 SGEs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 3) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
3460

3461
			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3462
					datasegcnt, protsegcnt);
3463

3464
			/* we should have 3 or more entries in buffer list */
3465 3466
			if ((num_sge < 3) ||
			    (num_sge > phba->cfg_total_seg_cnt))
3467 3468
				goto err;
			break;
3469

3470 3471
		case LPFC_PG_TYPE_INVALID:
		default:
3472 3473 3474
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

3475 3476 3477 3478 3479 3480 3481
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9083 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496
	switch (scsi_get_prot_op(scsi_cmnd)) {
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_STRIP:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
		break;
	case SCSI_PROT_WRITE_INSERT:
	case SCSI_PROT_READ_INSERT:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
		break;
	case SCSI_PROT_WRITE_PASS:
	case SCSI_PROT_READ_PASS:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
		break;
	}

3497 3498 3499 3500 3501 3502 3503 3504 3505
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

3506 3507 3508 3509 3510 3511 3512 3513
	/*
	 * If the OAS driver feature is enabled and the lun is enabled for
	 * OAS, set the oas iocb related flags.
	 */
	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
		scsi_cmnd->device->hostdata)->oas_enabled)
		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);

3514 3515
	return 0;
err:
3516 3517 3518 3519 3520 3521 3522
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

3523
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3524 3525 3526 3527 3528 3529 3530 3531
			"9084 Cannot setup S/G List for HBA"
			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
			prot_group_type, num_sge);

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
3532 3533 3534
	return 1;
}

3535 3536 3537 3538 3539 3540 3541 3542 3543
/**
 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
3544 3545
 *	1 - Error
 *	0 - Success
3546 3547 3548 3549 3550 3551 3552
 **/
static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
/**
 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * using BlockGuard.
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
 *	1 - Error
 *	0 - Success
 **/
static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3572
/**
3573
 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
 * @phba: Pointer to hba context object.
 * @vport: Pointer to vport object.
 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
 * @rsp_iocb: Pointer to response iocb object which reported error.
 *
 * This function posts an event when there is a SCSI command reporting
 * error from the scsi device.
 **/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
	struct lpfc_fast_path_event *fast_path_evt = NULL;
	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
	unsigned long flags;

3594 3595 3596
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665
	/* If there is queuefull or busy condition send a scsi event */
	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
		(cmnd->result == SAM_STAT_BUSY)) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.scsi_evt.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.scsi_evt.subcategory =
		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
			LPFC_EVENT_CHECK_COND;
		fast_path_evt->un.check_cond_evt.scsi_event.lun =
			cmnd->device->lun;
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.check_cond_evt.sense_key =
			cmnd->sense_buffer[2] & 0xf;
		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
		     fcpi_parm &&
		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
			((scsi_status == SAM_STAT_GOOD) &&
			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
		/*
		 * If status is good or resid does not match with fcp_param and
		 * there is valid fcpi_parm, then there is a read_check error
		 */
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.read_check_error.header.event_type =
			FC_REG_FABRIC_EVENT;
		fast_path_evt->un.read_check_error.header.subcategory =
			LPFC_EVENT_FCPRDCHKERR;
		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
		fast_path_evt->un.read_check_error.fcpiparam =
			fcpi_parm;
	} else
		return;

	fast_path_evt->vport = vport;
	spin_lock_irqsave(&phba->hbalock, flags);
	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	lpfc_worker_wake_up(phba);
	return;
}
3666 3667

/**
3668
 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3669
 * @phba: The HBA for which this call is being executed.
3670 3671 3672
 * @psb: The scsi buffer which is going to be un-mapped.
 *
 * This routine does DMA un-mapping of scatter gather list of scsi command
3673
 * field of @lpfc_cmd for device with SLI-3 interface spec.
3674
 **/
3675
static void
3676
lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3677 3678 3679 3680 3681 3682 3683
{
	/*
	 * There are only two special cases to consider.  (1) the scsi command
	 * requested scatter-gather usage or (2) the scsi command allocated
	 * a request buffer, but did not request use_sg.  There is a third
	 * case, but it does not require resource deallocation.
	 */
3684 3685
	if (psb->seg_cnt > 0)
		scsi_dma_unmap(psb->pCmd);
3686 3687 3688 3689
	if (psb->prot_seg_cnt > 0)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
				scsi_prot_sg_count(psb->pCmd),
				psb->pCmd->sc_data_direction);
3690 3691
}

3692
/**
3693
 * lpfc_handler_fcp_err - FCP response handler
3694 3695 3696 3697 3698 3699 3700 3701
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @rsp_iocb: The response IOCB which contains FCP error.
 *
 * This routine is called to process response IOCB with status field
 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
 * based upon SCSI and FCP error.
 **/
已提交
3702
static void
J
James Smart 已提交
3703 3704
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
		    struct lpfc_iocbq *rsp_iocb)
已提交
3705 3706 3707 3708
{
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3709
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
已提交
3710 3711
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
3712
	uint32_t *lp;
已提交
3713 3714
	uint32_t host_status = DID_OK;
	uint32_t rsplen = 0;
3715
	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
已提交
3716

3717

已提交
3718 3719 3720 3721 3722 3723 3724 3725 3726 3727
	/*
	 *  If this is a task management command, there is no
	 *  scsi packet associated with this lpfc_cmd.  The driver
	 *  consumes it.
	 */
	if (fcpcmd->fcpCntl2) {
		scsi_status = 0;
		goto out;
	}

3728 3729
	if (resp_info & RSP_LEN_VALID) {
		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3730
		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3731 3732
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2719 Invalid response length: "
H
Hannes Reinecke 已提交
3733
				 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3734 3735 3736 3737 3738 3739
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 rsplen);
			host_status = DID_ERROR;
			goto out;
		}
3740 3741 3742 3743
		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2757 Protocol failure detected during "
				 "processing of FCP I/O op: "
H
Hannes Reinecke 已提交
3744
				 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3745 3746 3747 3748 3749 3750
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 fcprsp->rspInfo3);
			host_status = DID_ERROR;
			goto out;
		}
3751 3752
	}

3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763
	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
		if (snslen > SCSI_SENSE_BUFFERSIZE)
			snslen = SCSI_SENSE_BUFFERSIZE;

		if (resp_info & RSP_LEN_VALID)
		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
	}
	lp = (uint32_t *)cmnd->sense_buffer;

3764 3765 3766 3767 3768 3769 3770 3771 3772
	/* special handling for under run conditions */
	if (!scsi_status && (resp_info & RESID_UNDER)) {
		/* don't log under runs if fcp set... */
		if (vport->cfg_log_verbose & LOG_FCP)
			logit = LOG_FCP_ERROR;
		/* unless operator says so */
		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
			logit = LOG_FCP_UNDER;
	}
3773

3774
	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3775
			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3776 3777 3778 3779 3780 3781 3782
			 "Data: x%x x%x x%x x%x x%x\n",
			 cmnd->cmnd[0], scsi_status,
			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
			 be32_to_cpu(fcprsp->rspResId),
			 be32_to_cpu(fcprsp->rspSnsLen),
			 be32_to_cpu(fcprsp->rspRspLen),
			 fcprsp->rspInfo3);
已提交
3783

3784
	scsi_set_resid(cmnd, 0);
已提交
3785
	if (resp_info & RESID_UNDER) {
3786
		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
已提交
3787

3788
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3789
				 "9025 FCP Read Underrun, expected %d, "
3790 3791 3792 3793
				 "residual %d Data: x%x x%x x%x\n",
				 be32_to_cpu(fcpcmd->fcpDl),
				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
				 cmnd->underflow);
已提交
3794

3795 3796 3797 3798 3799 3800 3801
		/*
		 * If there is an under run check if under run reported by
		 * storage array is same as the under run reported by HBA.
		 * If this is not same, there is a dropped frame.
		 */
		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
			fcpi_parm &&
3802
			(scsi_get_resid(cmnd) != fcpi_parm)) {
3803 3804
			lpfc_printf_vlog(vport, KERN_WARNING,
					 LOG_FCP | LOG_FCP_ERROR,
3805
					 "9026 FCP Read Check Error "
3806 3807 3808 3809
					 "and Underrun Data: x%x x%x x%x x%x\n",
					 be32_to_cpu(fcpcmd->fcpDl),
					 scsi_get_resid(cmnd), fcpi_parm,
					 cmnd->cmnd[0]);
3810
			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3811 3812
			host_status = DID_ERROR;
		}
已提交
3813 3814
		/*
		 * The cmnd->underflow is the minimum number of bytes that must
L
Lucas De Marchi 已提交
3815
		 * be transferred for this command.  Provided a sense condition
已提交
3816 3817 3818 3819 3820
		 * is not present, make sure the actual amount transferred is at
		 * least the underflow value or fail.
		 */
		if (!(resp_info & SNS_LEN_VALID) &&
		    (scsi_status == SAM_STAT_GOOD) &&
3821 3822
		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
		     < cmnd->underflow)) {
3823
			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3824
					 "9027 FCP command x%x residual "
3825 3826
					 "underrun converted to error "
					 "Data: x%x x%x x%x\n",
3827
					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3828
					 scsi_get_resid(cmnd), cmnd->underflow);
已提交
3829 3830 3831
			host_status = DID_ERROR;
		}
	} else if (resp_info & RESID_OVER) {
3832
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3833
				 "9028 FCP command x%x residual overrun error. "
3834
				 "Data: x%x x%x\n", cmnd->cmnd[0],
3835
				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
已提交
3836 3837 3838 3839
		host_status = DID_ERROR;

	/*
	 * Check SLI validation that all the transfer was actually done
3840
	 * (fcpi_parm should be zero). Apply check only to reads.
已提交
3841
	 */
3842
	} else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
3843
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3844
				 "9029 FCP Read Check Error Data: "
J
James Smart 已提交
3845
				 "x%x x%x x%x x%x x%x\n",
3846 3847
				 be32_to_cpu(fcpcmd->fcpDl),
				 be32_to_cpu(fcprsp->rspResId),
J
James Smart 已提交
3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859
				 fcpi_parm, cmnd->cmnd[0], scsi_status);
		switch (scsi_status) {
		case SAM_STAT_GOOD:
		case SAM_STAT_CHECK_CONDITION:
			/* Fabric dropped a data frame. Fail any successful
			 * command in which we detected dropped frames.
			 * A status of good or some check conditions could
			 * be considered a successful command.
			 */
			host_status = DID_ERROR;
			break;
		}
3860
		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
已提交
3861 3862 3863 3864
	}

 out:
	cmnd->result = ScsiResult(host_status, scsi_status);
3865
	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
已提交
3866 3867
}

3868
/**
3869
 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3870 3871
 * @phba: The Hba for which this call is being executed.
 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3872
 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3873 3874 3875 3876 3877
 *
 * This routine assigns scsi command result by looking into response IOCB
 * status field appropriately. This routine handles QUEUE FULL condition as
 * well by ramping down device queue depth.
 **/
已提交
3878 3879 3880 3881 3882 3883
static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
			struct lpfc_iocbq *pIocbOut)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) pIocbIn->context1;
J
James Smart 已提交
3884
	struct lpfc_vport      *vport = pIocbIn->vport;
已提交
3885 3886
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
3887
	struct scsi_cmnd *cmd;
3888
	int result;
3889
	int depth;
3890
	unsigned long flags;
3891
	struct lpfc_fast_path_event *fast_path_evt;
3892
	struct Scsi_Host *shost;
3893
	uint32_t queue_depth, scsi_id;
3894
	uint32_t logit = LOG_FCP;
已提交
3895

3896 3897 3898 3899 3900 3901
	/* Sanity check on return of outstanding command */
	if (!(lpfc_cmd->pCmd))
		return;
	cmd = lpfc_cmd->pCmd;
	shost = cmd->device->host;

3902
	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
已提交
3903
	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3904 3905 3906
	/* pick up SLI4 exhange busy status from HBA */
	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;

3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
	if (lpfc_cmd->prot_data_type) {
		struct scsi_dif_tuple *src = NULL;

		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
		/*
		 * Used to restore any changes to protection
		 * data for error injection.
		 */
		switch (lpfc_cmd->prot_data_type) {
		case LPFC_INJERR_REFTAG:
			src->ref_tag =
				lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_APPTAG:
			src->app_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_GUARD:
			src->guard_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		default:
			break;
		}

		lpfc_cmd->prot_data = 0;
		lpfc_cmd->prot_data_type = 0;
		lpfc_cmd->prot_data_segment = NULL;
	}
#endif
3938 3939
	if (pnode && NLP_CHK_NODE_ACT(pnode))
		atomic_dec(&pnode->cmd_pending);
已提交
3940 3941 3942 3943 3944 3945 3946

	if (lpfc_cmd->status) {
		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
		    (lpfc_cmd->result & IOERR_DRVR_MASK))
			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
		else if (lpfc_cmd->status >= IOSTAT_CNT)
			lpfc_cmd->status = IOSTAT_DEFAULT;
3947 3948 3949 3950
		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3951 3952 3953 3954
			logit = 0;
		else
			logit = LOG_FCP | LOG_FCP_UNDER;
		lpfc_printf_vlog(vport, KERN_WARNING, logit,
H
Hannes Reinecke 已提交
3955
			 "9030 FCP cmd x%x failed <%d/%lld> "
3956 3957 3958
			 "status: x%x result: x%x "
			 "sid: x%x did: x%x oxid: x%x "
			 "Data: x%x x%x\n",
3959 3960 3961 3962
			 cmd->cmnd[0],
			 cmd->device ? cmd->device->id : 0xffff,
			 cmd->device ? cmd->device->lun : 0xffff,
			 lpfc_cmd->status, lpfc_cmd->result,
3963 3964
			 vport->fc_myDID,
			 (pnode) ? pnode->nlp_DID : 0,
3965 3966
			 phba->sli_rev == LPFC_SLI_REV4 ?
			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3967 3968
			 pIocbOut->iocb.ulpContext,
			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
已提交
3969 3970 3971 3972

		switch (lpfc_cmd->status) {
		case IOSTAT_FCP_RSP_ERROR:
			/* Call FCP RSP handler to determine result */
J
James Smart 已提交
3973
			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
已提交
3974 3975 3976
			break;
		case IOSTAT_NPORT_BSY:
		case IOSTAT_FABRIC_BSY:
3977
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001
			fast_path_evt = lpfc_alloc_fast_evt(phba);
			if (!fast_path_evt)
				break;
			fast_path_evt->un.fabric_evt.event_type =
				FC_REG_FABRIC_EVENT;
			fast_path_evt->un.fabric_evt.subcategory =
				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
					&pnode->nlp_portname,
					sizeof(struct lpfc_name));
				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
					&pnode->nlp_nodename,
					sizeof(struct lpfc_name));
			}
			fast_path_evt->vport = vport;
			fast_path_evt->work_evt.evt =
				LPFC_EVT_FASTPATH_MGMT_EVT;
			spin_lock_irqsave(&phba->hbalock, flags);
			list_add_tail(&fast_path_evt->work_evt.evt_listp,
				&phba->work_list);
			spin_unlock_irqrestore(&phba->hbalock, flags);
			lpfc_worker_wake_up(phba);
已提交
4002
			break;
4003
		case IOSTAT_LOCAL_REJECT:
4004
		case IOSTAT_REMOTE_STOP:
4005 4006 4007 4008 4009 4010 4011 4012 4013
			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
				break;
			}
4014
			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4015
			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4016 4017
			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4018
				cmd->result = ScsiResult(DID_REQUEUE, 0);
4019
				break;
4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035
			}
			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
					/*
					 * This is a response for a BG enabled
					 * cmd. Parse BG error
					 */
					lpfc_parse_bg_err(phba, lpfc_cmd,
							pIocbOut);
					break;
				} else {
					lpfc_printf_vlog(vport, KERN_WARNING,
							LOG_BG,
							"9031 non-zero BGSTAT "
4036
							"on unprotected cmd\n");
4037 4038
				}
			}
4039 4040 4041 4042 4043 4044 4045 4046
			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
				&& (phba->sli_rev == LPFC_SLI_REV4)
				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
				/* This IO was aborted by the target, we don't
				 * know the rxid and because we did not send the
				 * ABTS we cannot generate and RRQ.
				 */
				lpfc_set_rrq_active(phba, pnode,
4047 4048
					lpfc_cmd->cur_iocbq.sli4_lxritag,
					0, 0);
4049
			}
4050
		/* else: fall through */
已提交
4051 4052 4053 4054 4055
		default:
			cmd->result = ScsiResult(DID_ERROR, 0);
			break;
		}

4056
		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4057
		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4058 4059
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
						 SAM_STAT_BUSY);
4060
	} else
已提交
4061 4062 4063 4064 4065
		cmd->result = ScsiResult(DID_OK, 0);

	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
		uint32_t *lp = (uint32_t *)cmd->sense_buffer;

4066
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
H
Hannes Reinecke 已提交
4067
				 "0710 Iodone <%d/%llu> cmd %p, error "
4068 4069 4070 4071
				 "x%x SNS x%x x%x Data: x%x x%x\n",
				 cmd->device->id, cmd->device->lun, cmd,
				 cmd->result, *lp, *(lp + 3), cmd->retries,
				 scsi_get_resid(cmd));
已提交
4072 4073
	}

4074
	lpfc_update_stats(phba, lpfc_cmd);
4075
	result = cmd->result;
4076 4077 4078
	if (vport->cfg_max_scsicmpl_time &&
	   time_after(jiffies, lpfc_cmd->start_time +
		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4079
		spin_lock_irqsave(shost->host_lock, flags);
4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091
		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
			if (pnode->cmd_qdepth >
				atomic_read(&pnode->cmd_pending) &&
				(atomic_read(&pnode->cmd_pending) >
				LPFC_MIN_TGT_QDEPTH) &&
				((cmd->cmnd[0] == READ_10) ||
				(cmd->cmnd[0] == WRITE_10)))
				pnode->cmd_qdepth =
					atomic_read(&pnode->cmd_pending);

			pnode->last_change_time = jiffies;
		}
4092
		spin_unlock_irqrestore(shost->host_lock, flags);
4093
	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4094
		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
4095
		   time_after(jiffies, pnode->last_change_time +
4096
			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4097
			spin_lock_irqsave(shost->host_lock, flags);
4098 4099 4100 4101 4102 4103
			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
				/ 100;
			depth = depth ? depth : 1;
			pnode->cmd_qdepth += depth;
			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4104
			pnode->last_change_time = jiffies;
4105
			spin_unlock_irqrestore(shost->host_lock, flags);
4106
		}
4107 4108
	}

4109
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4110 4111 4112 4113

	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
	queue_depth = cmd->device->queue_depth;
	scsi_id = cmd->device->id;
4114 4115
	cmd->scsi_done(cmd);

4116
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4117
		spin_lock_irqsave(&phba->hbalock, flags);
4118
		lpfc_cmd->pCmd = NULL;
4119
		spin_unlock_irqrestore(&phba->hbalock, flags);
4120

4121 4122 4123 4124
		/*
		 * If there is a thread waiting for command completion
		 * wake up the thread.
		 */
4125
		spin_lock_irqsave(shost->host_lock, flags);
4126 4127
		if (lpfc_cmd->waitq)
			wake_up(lpfc_cmd->waitq);
4128
		spin_unlock_irqrestore(shost->host_lock, flags);
4129 4130 4131 4132
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return;
	}

4133
	spin_lock_irqsave(&phba->hbalock, flags);
4134
	lpfc_cmd->pCmd = NULL;
4135
	spin_unlock_irqrestore(&phba->hbalock, flags);
4136

4137 4138 4139 4140
	/*
	 * If there is a thread waiting for command completion
	 * wake up the thread.
	 */
4141
	spin_lock_irqsave(shost->host_lock, flags);
4142 4143
	if (lpfc_cmd->waitq)
		wake_up(lpfc_cmd->waitq);
4144
	spin_unlock_irqrestore(shost->host_lock, flags);
4145

4146
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4147 4148
}

4149
/**
4150
 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4151 4152 4153 4154 4155
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: The scsi command which needs to send.
 * @pnode: Pointer to lpfc_nodelist.
 *
 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4156
 * to transfer for device with SLI3 interface spec.
4157
 **/
已提交
4158
static void
4159
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
J
James Smart 已提交
4160
		    struct lpfc_nodelist *pnode)
已提交
4161
{
J
James Smart 已提交
4162
	struct lpfc_hba *phba = vport->phba;
已提交
4163 4164 4165 4166 4167
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
	int datadir = scsi_cmnd->sc_data_direction;
4168 4169
	uint8_t *ptr;
	bool sli4;
4170
	uint32_t fcpdl;
已提交
4171

4172 4173 4174
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

已提交
4175
	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4176 4177
	/* clear task management bits */
	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
已提交
4178

4179 4180
	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
			&lpfc_cmd->fcp_cmnd->fcp_lun);
已提交
4181

4182 4183 4184 4185 4186 4187 4188
	ptr = &fcp_cmnd->fcpCdb[0];
	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
		ptr += scsi_cmnd->cmd_len;
		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
	}

4189
	fcp_cmnd->fcpCntl1 = SIMPLE_Q;
已提交
4190

4191
	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4192
	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4193

已提交
4194 4195 4196 4197 4198 4199
	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
4200
	if (scsi_sg_count(scsi_cmnd)) {
已提交
4201 4202
		if (datadir == DMA_TO_DEVICE) {
			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4203
			iocb_cmd->ulpPU = PARM_READ_CHECK;
4204 4205
			if (vport->cfg_first_burst_size &&
			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
4206 4207 4208 4209 4210 4211
				fcpdl = scsi_bufflen(scsi_cmnd);
				if (fcpdl < vport->cfg_first_burst_size)
					piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
				else
					piocbq->iocb.un.fcpi.fcpi_XRdy =
						vport->cfg_first_burst_size;
4212
			}
已提交
4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232
			fcp_cmnd->fcpCntl3 = WRITE_DATA;
			phba->fc4OutputRequests++;
		} else {
			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
			iocb_cmd->ulpPU = PARM_READ_CHECK;
			fcp_cmnd->fcpCntl3 = READ_DATA;
			phba->fc4InputRequests++;
		}
	} else {
		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
		iocb_cmd->un.fcpi.fcpi_parm = 0;
		iocb_cmd->ulpPU = 0;
		fcp_cmnd->fcpCntl3 = 0;
		phba->fc4ControlRequests++;
	}
	/*
	 * Finish initializing those IOCB fields that are independent
	 * of the scsi_cmnd request_buffer
	 */
	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4233
	if (sli4)
4234 4235
		piocbq->iocb.ulpContext =
		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
已提交
4236 4237
	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
		piocbq->iocb.ulpFCP2Rcvy = 1;
4238 4239
	else
		piocbq->iocb.ulpFCP2Rcvy = 0;
已提交
4240 4241 4242 4243 4244

	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
	piocbq->context1  = lpfc_cmd;
	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
J
James Smart 已提交
4245
	piocbq->vport = vport;
已提交
4246 4247
}

4248
/**
4249
 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4250 4251 4252 4253 4254
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @lun: Logical unit number.
 * @task_mgmt_cmd: SCSI task management command.
 *
4255 4256
 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
 * for device with SLI-3 interface spec.
4257 4258 4259 4260 4261
 *
 * Return codes:
 *   0 - Error
 *   1 - Success
 **/
已提交
4262
static int
4263
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
已提交
4264
			     struct lpfc_scsi_buf *lpfc_cmd,
H
Hannes Reinecke 已提交
4265
			     uint64_t lun,
已提交
4266 4267 4268 4269 4270
			     uint8_t task_mgmt_cmd)
{
	struct lpfc_iocbq *piocbq;
	IOCB_t *piocb;
	struct fcp_cmnd *fcp_cmnd;
4271
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
已提交
4272 4273
	struct lpfc_nodelist *ndlp = rdata->pnode;

4274 4275
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
已提交
4276 4277 4278
		return 0;

	piocbq = &(lpfc_cmd->cur_iocbq);
J
James Smart 已提交
4279 4280
	piocbq->vport = vport;

已提交
4281 4282 4283
	piocb = &piocbq->iocb;

	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4284 4285 4286
	/* Clear out any old data in the FCP command area */
	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
已提交
4287
	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4288 4289
	if (vport->phba->sli_rev == 3 &&
	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4290
		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4291 4292
	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
	piocb->ulpContext = ndlp->nlp_rpi;
4293 4294 4295 4296
	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
		piocb->ulpContext =
		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
	}
4297
	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
已提交
4298
	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4299 4300
	piocb->ulpPU = 0;
	piocb->un.fcpi.fcpi_parm = 0;
已提交
4301 4302 4303 4304 4305 4306 4307 4308

	/* ulpTimeout is only one byte */
	if (lpfc_cmd->timeout > 0xff) {
		/*
		 * Do not timeout the command at the firmware level.
		 * The driver will provide the timeout mechanism.
		 */
		piocb->ulpTimeout = 0;
4309
	} else
已提交
4310
		piocb->ulpTimeout = lpfc_cmd->timeout;
4311

4312 4313
	if (vport->phba->sli_rev == LPFC_SLI_REV4)
		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4314

4315
	return 1;
4316 4317 4318
}

/**
L
Lucas De Marchi 已提交
4319
 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330
 * @phba: The hba struct for which this call is being executed.
 * @dev_grp: The HBA PCI-Device group number.
 *
 * This routine sets up the SCSI interface API function jump table in @phba
 * struct.
 * Returns: 0 - success, -ENODEV - failure.
 **/
int
lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{

4331 4332 4333
	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;

4334 4335 4336 4337
	switch (dev_grp) {
	case LPFC_PCI_DEV_LP:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4338
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4339
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4340
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4341
		break;
4342 4343 4344
	case LPFC_PCI_DEV_OC:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4345
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4346
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4347
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4348
		break;
4349 4350 4351 4352 4353 4354 4355 4356
	default:
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"1418 Invalid HBA PCI-device group: 0x%x\n",
				dev_grp);
		return -ENODEV;
		break;
	}
	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4357
	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4358 4359 4360
	return 0;
}

4361
/**
4362
 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4363 4364 4365 4366 4367 4368 4369
 * @phba: The Hba for which this call is being executed.
 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
 * @rspiocbq: Pointer to lpfc_iocbq data structure.
 *
 * This routine is IOCB completion routine for device reset and target reset
 * routine. This routine release scsi buffer associated with lpfc_cmd.
 **/
4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
			struct lpfc_iocbq *cmdiocbq,
			struct lpfc_iocbq *rspiocbq)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) cmdiocbq->context1;
	if (lpfc_cmd)
		lpfc_release_scsi_buf(phba, lpfc_cmd);
	return;
}

4382
/**
4383
 * lpfc_info - Info entry point of scsi_host_template data structure
4384 4385 4386 4387 4388 4389 4390
 * @host: The scsi host for which this call is being executed.
 *
 * This routine provides module information about hba.
 *
 * Reutrn code:
 *   Pointer to char - Success.
 **/
已提交
4391 4392 4393
const char *
lpfc_info(struct Scsi_Host *host)
{
J
James Smart 已提交
4394 4395
	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4396
	int len, link_speed = 0;
已提交
4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415
	static char  lpfcinfobuf[384];

	memset(lpfcinfobuf,0,384);
	if (phba && phba->pcidev){
		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
		len = strlen(lpfcinfobuf);
		snprintf(lpfcinfobuf + len,
			384-len,
			" on PCI bus %02x device %02x irq %d",
			phba->pcidev->bus->number,
			phba->pcidev->devfn,
			phba->pcidev->irq);
		len = strlen(lpfcinfobuf);
		if (phba->Port[0]) {
			snprintf(lpfcinfobuf + len,
				 384-len,
				 " port %s",
				 phba->Port);
		}
4416
		len = strlen(lpfcinfobuf);
4417 4418 4419 4420 4421 4422 4423 4424
		if (phba->sli_rev <= LPFC_SLI_REV3) {
			link_speed = lpfc_sli_port_speed_get(phba);
		} else {
			if (phba->sli4_hba.link_state.logical_speed)
				link_speed =
				      phba->sli4_hba.link_state.logical_speed;
			else
				link_speed = phba->sli4_hba.link_state.speed;
4425
		}
4426 4427 4428
		if (link_speed != 0)
			snprintf(lpfcinfobuf + len, 384-len,
				 " Logical Link Speed: %d Mbps", link_speed);
已提交
4429 4430 4431 4432
	}
	return lpfcinfobuf;
}

4433
/**
4434
 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4435 4436 4437 4438 4439
 * @phba: The Hba for which this call is being executed.
 *
 * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
 * The default value of cfg_poll_tmo is 10 milliseconds.
 **/
4440 4441 4442 4443 4444
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
	unsigned long  poll_tmo_expires =
		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));

4445
	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4446 4447 4448 4449
		mod_timer(&phba->fcp_poll_timer,
			  poll_tmo_expires);
}

4450
/**
4451
 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4452 4453 4454 4455
 * @phba: The Hba for which this call is being executed.
 *
 * This routine starts the fcp_poll_timer of @phba.
 **/
4456 4457 4458 4459 4460
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
	lpfc_poll_rearm_timer(phba);
}

4461
/**
4462
 * lpfc_poll_timeout - Restart polling timer
4463 4464 4465 4466 4467 4468
 * @ptr: Map to lpfc_hba data structure pointer.
 *
 * This routine restarts fcp_poll timer, when FCP ring  polling is enable
 * and FCP Ring interrupt is disable.
 **/

4469 4470
void lpfc_poll_timeout(unsigned long ptr)
{
J
James Smart 已提交
4471
	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4472 4473

	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4474 4475 4476
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);

4477 4478 4479 4480 4481
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}
}

4482
/**
4483
 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494
 * @cmnd: Pointer to scsi_cmnd data structure.
 * @done: Pointer to done routine.
 *
 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
 * This routine prepares an IOCB from scsi command and provides to firmware.
 * The @done callback is invoked after driver finished processing the command.
 *
 * Return value :
 *   0 - Success
 *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
 **/
已提交
4495
static int
4496
lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
已提交
4497
{
J
James Smart 已提交
4498 4499
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4500
	struct lpfc_rport_data *rdata;
4501
	struct lpfc_nodelist *ndlp;
4502
	struct lpfc_scsi_buf *lpfc_cmd;
4503 4504
	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
	int err;
已提交
4505

4506
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4507 4508 4509
	err = fc_remote_port_chkready(rport);
	if (err) {
		cmnd->result = err;
已提交
4510 4511
		goto out_fail_command;
	}
4512
	ndlp = rdata->pnode;
已提交
4513

4514
	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4515
		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4516

4517 4518 4519 4520
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
				" op:%02x str=%s without registering for"
				" BlockGuard - Rejecting command\n",
4521 4522 4523 4524 4525
				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
				dif_op_str[scsi_get_prot_op(cmnd)]);
		goto out_fail_command;
	}

已提交
4526
	/*
4527 4528
	 * Catch race where our node has transitioned, but the
	 * transport is still transitioning.
已提交
4529
	 */
4530 4531
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
		goto out_tgt_busy;
4532
	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4533
		goto out_tgt_busy;
4534

4535
	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
已提交
4536
	if (lpfc_cmd == NULL) {
4537
		lpfc_rampdown_queue_depth(phba);
4538

4539 4540 4541
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0707 driver's buffer pool is empty, "
				 "IO busied\n");
已提交
4542 4543 4544 4545 4546 4547 4548 4549 4550 4551
		goto out_host_busy;
	}

	/*
	 * Store the midlayer's command structure for the completion phase
	 * and complete the command initialization.
	 */
	lpfc_cmd->pCmd  = cmnd;
	lpfc_cmd->rdata = rdata;
	lpfc_cmd->timeout = 0;
4552
	lpfc_cmd->start_time = jiffies;
已提交
4553 4554
	cmnd->host_scribble = (unsigned char *)lpfc_cmd;

4555
	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4556
		if (vport->phba->cfg_enable_bg) {
4557 4558
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4559 4560 4561 4562 4563 4564 4565
					 "9033 BLKGRD: rcvd %s cmd:x%x "
					 "sector x%llx cnt %u pt %x\n",
					 dif_op_str[scsi_get_prot_op(cmnd)],
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
					 blk_rq_sectors(cmnd->request),
					 (cmnd->cmnd[1]>>5));
4566
		}
4567 4568
		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
	} else {
4569
		if (vport->phba->cfg_enable_bg) {
4570 4571
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4572 4573 4574 4575
					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
					 "x%x sector x%llx cnt %u pt %x\n",
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
4576
					 blk_rq_sectors(cmnd->request),
4577
					 (cmnd->cmnd[1]>>5));
4578
		}
4579 4580 4581
		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
	}

已提交
4582 4583 4584
	if (err)
		goto out_host_busy_free_buf;

J
James Smart 已提交
4585
	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
已提交
4586

4587
	atomic_inc(&ndlp->cmd_pending);
4588
	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4589
				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4590 4591
	if (err) {
		atomic_dec(&ndlp->cmd_pending);
4592 4593
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "3376 FCP could not issue IOCB err %x"
H
Hannes Reinecke 已提交
4594
				 "FCP cmd x%x <%d/%llu> "
4595 4596 4597 4598
				 "sid: x%x did: x%x oxid: x%x "
				 "Data: x%x x%x x%x x%x\n",
				 err, cmnd->cmnd[0],
				 cmnd->device ? cmnd->device->id : 0xffff,
H
Hannes Reinecke 已提交
4599
				 cmnd->device ? cmnd->device->lun : (u64) -1,
4600 4601 4602 4603 4604 4605 4606 4607 4608 4609
				 vport->fc_myDID, ndlp->nlp_DID,
				 phba->sli_rev == LPFC_SLI_REV4 ?
				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
				 (uint32_t)
				 (cmnd->request->timeout / 1000));


已提交
4610
		goto out_host_busy_free_buf;
4611
	}
4612
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4613 4614 4615
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);

4616 4617 4618 4619
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
4620 4621 4622
	return 0;

 out_host_busy_free_buf:
4623
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4624
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4625 4626 4627
 out_host_busy:
	return SCSI_MLQUEUE_HOST_BUSY;

4628 4629 4630
 out_tgt_busy:
	return SCSI_MLQUEUE_TARGET_BUSY;

已提交
4631
 out_fail_command:
4632
	cmnd->scsi_done(cmnd);
已提交
4633 4634 4635
	return 0;
}

J
Jeff Garzik 已提交
4636

4637
/**
4638
 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4639 4640 4641 4642 4643 4644 4645 4646
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine aborts @cmnd pending in base driver.
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
已提交
4647
static int
4648
lpfc_abort_handler(struct scsi_cmnd *cmnd)
已提交
4649
{
J
James Smart 已提交
4650 4651 4652
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4653 4654
	struct lpfc_iocbq *iocb;
	struct lpfc_iocbq *abtsiocb;
已提交
4655 4656
	struct lpfc_scsi_buf *lpfc_cmd;
	IOCB_t *cmd, *icmd;
4657
	int ret = SUCCESS, status = 0;
4658 4659 4660
	struct lpfc_sli_ring *pring_s4;
	int ring_number, ret_val;
	unsigned long flags, iflags;
4661
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
已提交
4662

4663
	status = fc_block_scsi_eh(cmnd);
4664
	if (status != 0 && status != SUCCESS)
4665
		return status;
4666

4667
	spin_lock_irqsave(&phba->hbalock, flags);
4668 4669
	/* driver queued commands are in process of being flushed */
	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4670
		spin_unlock_irqrestore(&phba->hbalock, flags);
4671 4672 4673 4674 4675 4676
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3168 SCSI Layer abort requested I/O has been "
			"flushed by LLD.\n");
		return FAILED;
	}

4677
	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4678
	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4679
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
4680 4681
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
H
Hannes Reinecke 已提交
4682
			 "x%x ID %d LUN %llu\n",
4683
			 SUCCESS, cmnd->device->id, cmnd->device->lun);
J
James Smart 已提交
4684 4685
		return SUCCESS;
	}
已提交
4686

4687 4688 4689
	iocb = &lpfc_cmd->cur_iocbq;
	/* the command is in process of being cancelled */
	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4690
		spin_unlock_irqrestore(&phba->hbalock, flags);
4691 4692 4693 4694 4695
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3169 SCSI Layer abort requested I/O has been "
			"cancelled by LLD.\n");
		return FAILED;
	}
4696 4697 4698 4699
	/*
	 * If pCmd field of the corresponding lpfc_scsi_buf structure
	 * points to a different SCSI command, then the driver has
	 * already completed this command, but the midlayer did not
4700
	 * see the completion before the eh fired. Just return SUCCESS.
4701
	 */
4702 4703 4704 4705 4706 4707
	if (lpfc_cmd->pCmd != cmnd) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3170 SCSI Layer abort requested I/O has been "
			"completed by LLD.\n");
		goto out_unlock;
	}
已提交
4708

4709
	BUG_ON(iocb->context1 != lpfc_cmd);
已提交
4710

4711 4712 4713 4714 4715 4716 4717 4718
	/* abort issued in recovery is still in progress */
	if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "3389 SCSI Layer I/O Abort Request is pending\n");
		spin_unlock_irqrestore(&phba->hbalock, flags);
		goto wait_for_cmpl;
	}

4719
	abtsiocb = __lpfc_sli_get_iocbq(phba);
4720 4721
	if (abtsiocb == NULL) {
		ret = FAILED;
4722
		goto out_unlock;
已提交
4723 4724
	}

4725 4726 4727
	/* Indicate the IO is being aborted by the driver. */
	iocb->iocb_flag |= LPFC_DRIVER_ABORTED;

已提交
4728
	/*
4729 4730 4731
	 * The scsi command can not be in txq and it is in flight because the
	 * pCmd is still pointig at the SCSI command we have to abort. There
	 * is no need to search the txcmplq. Just send an abort to the FW.
已提交
4732 4733
	 */

4734 4735 4736 4737
	cmd = &iocb->iocb;
	icmd = &abtsiocb->iocb;
	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4738 4739 4740 4741
	if (phba->sli_rev == LPFC_SLI_REV4)
		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
	else
		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
已提交
4742

4743 4744
	icmd->ulpLe = 1;
	icmd->ulpClass = cmd->ulpClass;
4745 4746 4747

	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4748
	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4749 4750
	if (iocb->iocb_flag & LPFC_IO_FOF)
		abtsiocb->iocb_flag |= LPFC_IO_FOF;
4751

J
James Smart 已提交
4752
	if (lpfc_is_link_up(phba))
4753 4754 4755
		icmd->ulpCommand = CMD_ABORT_XRI_CN;
	else
		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
已提交
4756

4757
	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
J
James Smart 已提交
4758
	abtsiocb->vport = vport;
4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770
	if (phba->sli_rev == LPFC_SLI_REV4) {
		ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
		pring_s4 = &phba->sli.ring[ring_number];
		/* Note: both hbalock and ring_lock must be set here */
		spin_lock_irqsave(&pring_s4->ring_lock, iflags);
		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
						abtsiocb, 0);
		spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
	} else {
		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
						abtsiocb, 0);
	}
4771
	/* no longer need the lock after this point */
4772
	spin_unlock_irqrestore(&phba->hbalock, flags);
4773

4774 4775

	if (ret_val == IOCB_ERROR) {
4776 4777 4778 4779
		lpfc_sli_release_iocbq(phba, abtsiocb);
		ret = FAILED;
		goto out;
	}
已提交
4780

4781
	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4782 4783
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4784

4785
wait_for_cmpl:
4786
	lpfc_cmd->waitq = &waitq;
4787
	/* Wait for abort to complete */
4788 4789
	wait_event_timeout(waitq,
			  (lpfc_cmd->pCmd != cmnd),
4790
			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4791 4792

	spin_lock_irqsave(shost->host_lock, flags);
4793
	lpfc_cmd->waitq = NULL;
4794
	spin_unlock_irqrestore(shost->host_lock, flags);
已提交
4795

4796 4797
	if (lpfc_cmd->pCmd == cmnd) {
		ret = FAILED;
4798 4799
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0748 abort handler timed out waiting "
4800
				 "for abortng I/O (xri:x%x) to complete: "
H
Hannes Reinecke 已提交
4801
				 "ret %#x, ID %d, LUN %llu\n",
4802 4803
				 iocb->sli4_xritag, ret,
				 cmnd->device->id, cmnd->device->lun);
已提交
4804
	}
4805
	goto out;
已提交
4806

4807
out_unlock:
4808
	spin_unlock_irqrestore(&phba->hbalock, flags);
4809
out:
4810 4811
	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
H
Hannes Reinecke 已提交
4812
			 "LUN %llu\n", ret, cmnd->device->id,
4813
			 cmnd->device->lun);
4814
	return ret;
4815 4816
}

4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839
static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
{
	switch (task_mgmt_cmd) {
	case FCP_ABORT_TASK_SET:
		return "ABORT_TASK_SET";
	case FCP_CLEAR_TASK_SET:
		return "FCP_CLEAR_TASK_SET";
	case FCP_BUS_RESET:
		return "FCP_BUS_RESET";
	case FCP_LUN_RESET:
		return "FCP_LUN_RESET";
	case FCP_TARGET_RESET:
		return "FCP_TARGET_RESET";
	case FCP_CLEAR_ACA:
		return "FCP_CLEAR_ACA";
	case FCP_TERMINATE_TASK:
		return "FCP_TERMINATE_TASK";
	default:
		return "unknown";
	}
}

4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906

/**
 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 *
 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
static int
lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t rsp_info;
	uint32_t rsp_len;
	uint8_t  rsp_info_code;
	int ret = FAILED;


	if (fcprsp == NULL)
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0703 fcp_rsp is missing\n");
	else {
		rsp_info = fcprsp->rspStatus2;
		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
		rsp_info_code = fcprsp->rspInfo3;


		lpfc_printf_vlog(vport, KERN_INFO,
				 LOG_FCP,
				 "0706 fcp_rsp valid 0x%x,"
				 " rsp len=%d code 0x%x\n",
				 rsp_info,
				 rsp_len, rsp_info_code);

		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
			switch (rsp_info_code) {
			case RSP_NO_FAILURE:
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0715 Task Mgmt No Failure\n");
				ret = SUCCESS;
				break;
			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0716 Task Mgmt Target "
						"reject\n");
				break;
			case RSP_TM_NOT_COMPLETED: /* TM failed */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0717 Task Mgmt Target "
						"failed TM\n");
				break;
			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0718 Task Mgmt to invalid "
						"LUN\n");
				break;
			}
		}
	}
	return ret;
}


4907
/**
4908 4909 4910 4911 4912 4913
 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
 * @vport: The virtual port for which this call is being executed.
 * @rdata: Pointer to remote port local data
 * @tgt_id: Target ID of remote device.
 * @lun_id: Lun number for the TMF
 * @task_mgmt_cmd: type of TMF to send
4914
 *
4915 4916
 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
 * a remote port.
4917
 *
4918 4919 4920
 * Return Code:
 *   0x2003 - Error
 *   0x2002 - Success.
4921
 **/
已提交
4922
static int
4923
lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
H
Hannes Reinecke 已提交
4924
		    unsigned  tgt_id, uint64_t lun_id,
4925
		    uint8_t task_mgmt_cmd)
已提交
4926
{
J
James Smart 已提交
4927
	struct lpfc_hba   *phba = vport->phba;
4928
	struct lpfc_scsi_buf *lpfc_cmd;
4929 4930
	struct lpfc_iocbq *iocbq;
	struct lpfc_iocbq *iocbqrsp;
4931
	struct lpfc_nodelist *pnode = rdata->pnode;
4932
	int ret;
4933
	int status;
已提交
4934

4935
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4936
		return FAILED;
4937

4938
	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
已提交
4939
	if (lpfc_cmd == NULL)
4940
		return FAILED;
4941
	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
4942
	lpfc_cmd->rdata = rdata;
已提交
4943

4944 4945
	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
					   task_mgmt_cmd);
4946 4947 4948 4949
	if (!status) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
已提交
4950

4951
	iocbq = &lpfc_cmd->cur_iocbq;
4952
	iocbqrsp = lpfc_sli_get_iocbq(phba);
4953 4954 4955 4956
	if (iocbqrsp == NULL) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
4957
	iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4958

4959
	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
H
Hannes Reinecke 已提交
4960
			 "0702 Issue %s to TGT %d LUN %llu "
4961
			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
4962
			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
4963 4964
			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
			 iocbq->iocb_flag);
4965

4966
	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
4967
					  iocbq, iocbqrsp, lpfc_cmd->timeout);
4968 4969
	if ((status != IOCB_SUCCESS) ||
	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
4970
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
4971
			 "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) "
4972
			 "iocb_flag x%x\n",
4973 4974
			 lpfc_taskmgmt_name(task_mgmt_cmd),
			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
4975 4976
			 iocbqrsp->iocb.un.ulpWord[4],
			 iocbq->iocb_flag);
4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991
		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
		if (status == IOCB_SUCCESS) {
			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
				/* Something in the FCP_RSP was invalid.
				 * Check conditions */
				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
			else
				ret = FAILED;
		} else if (status == IOCB_TIMEDOUT) {
			ret = TIMEOUT_ERROR;
		} else {
			ret = FAILED;
		}
		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
	} else
4992 4993
		ret = SUCCESS;

4994
	lpfc_sli_release_iocbq(phba, iocbqrsp);
4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016

	if (ret != TIMEOUT_ERROR)
		lpfc_release_scsi_buf(phba, lpfc_cmd);

	return ret;
}

/**
 * lpfc_chk_tgt_mapped -
 * @vport: The virtual port to check on
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine delays until the scsi target (aka rport) for the
 * command exists (is present and logged in) or we declare it non-existent.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
5017
	struct lpfc_rport_data *rdata;
5018
	struct lpfc_nodelist *pnode;
5019 5020
	unsigned long later;

5021
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5022 5023 5024 5025 5026 5027
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038
	/*
	 * If target is not in a MAPPED state, delay until
	 * target is rediscovered or devloss timeout expires.
	 */
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies)) {
		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
			return FAILED;
		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
			return SUCCESS;
		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5039
		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074
		if (!rdata)
			return FAILED;
		pnode = rdata->pnode;
	}
	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
		return FAILED;
	return SUCCESS;
}

/**
 * lpfc_reset_flush_io_context -
 * @vport: The virtual port (scsi_host) for the flush context
 * @tgt_id: If aborting by Target contect - specifies the target id
 * @lun_id: If aborting by Lun context - specifies the lun id
 * @context: specifies the context level to flush at.
 *
 * After a reset condition via TMF, we need to flush orphaned i/o
 * contexts from the adapter. This routine aborts any contexts
 * outstanding, then waits for their completions. The wait is
 * bounded by devloss_tmo though.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
			uint64_t lun_id, lpfc_ctx_cmd context)
{
	struct lpfc_hba   *phba = vport->phba;
	unsigned long later;
	int cnt;

	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5075
	if (cnt)
5076 5077 5078
		lpfc_sli_abort_taskmgmt(vport,
					&phba->sli.ring[phba->sli.fcp_ring],
					tgt_id, lun_id, context);
5079 5080 5081
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies) && cnt) {
		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5082
		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
已提交
5083 5084
	}
	if (cnt) {
5085
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5086 5087 5088 5089 5090 5091
			"0724 I/O flush failure for context %s : cnt x%x\n",
			((context == LPFC_CTX_LUN) ? "LUN" :
			 ((context == LPFC_CTX_TGT) ? "TGT" :
			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
			cnt);
		return FAILED;
已提交
5092
	}
5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111
	return SUCCESS;
}

/**
 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a device reset by sending a LUN_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5112
	struct lpfc_rport_data *rdata;
5113
	struct lpfc_nodelist *pnode;
5114
	unsigned tgt_id = cmnd->device->id;
H
Hannes Reinecke 已提交
5115
	uint64_t lun_id = cmnd->device->lun;
5116
	struct lpfc_scsi_event_header scsi_event;
5117
	int status;
5118

5119
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5120
	if (!rdata || !rdata->pnode) {
5121
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5122 5123
				 "0798 Device Reset rport failure: rdata x%p\n",
				 rdata);
5124 5125 5126
		return FAILED;
	}
	pnode = rdata->pnode;
5127
	status = fc_block_scsi_eh(cmnd);
5128
	if (status != 0 && status != SUCCESS)
5129
		return status;
5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0721 Device Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
	scsi_event.lun = lun_id;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
						FCP_LUN_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5151
			 "0713 SCSI layer issued Device Reset (%d, %llu) "
5152 5153 5154 5155 5156 5157 5158 5159
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5160 5161
	if (status == SUCCESS)
		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5162
						LPFC_CTX_LUN);
5163 5164

	return status;
5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182
}

/**
 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a target reset by sending a TARGET_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5183
	struct lpfc_rport_data *rdata;
5184
	struct lpfc_nodelist *pnode;
5185
	unsigned tgt_id = cmnd->device->id;
H
Hannes Reinecke 已提交
5186
	uint64_t lun_id = cmnd->device->lun;
5187
	struct lpfc_scsi_event_header scsi_event;
5188
	int status;
5189

5190
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5191 5192 5193 5194 5195 5196
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0799 Target Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5197
	status = fc_block_scsi_eh(cmnd);
5198
	if (status != 0 && status != SUCCESS)
5199
		return status;
5200 5201 5202 5203 5204

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0722 Target Reset rport failure: rdata x%p\n", rdata);
5205 5206 5207 5208 5209 5210
		if (pnode) {
			spin_lock_irq(shost->host_lock);
			pnode->nlp_flag &= ~NLP_NPR_ADISC;
			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
			spin_unlock_irq(shost->host_lock);
		}
5211 5212 5213
		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
					  LPFC_CTX_TGT);
		return FAST_IO_FAIL;
5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
					FCP_TARGET_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5229
			 "0723 SCSI layer issued Target Reset (%d, %llu) "
5230 5231 5232 5233 5234 5235 5236 5237
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5238 5239
	if (status == SUCCESS)
		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5240
					  LPFC_CTX_TGT);
5241
	return status;
已提交
5242 5243
}

5244
/**
5245
 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5246 5247
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
5248 5249
 * This routine does target reset to all targets on @cmnd->device->host.
 * This emulates Parallel SCSI Bus Reset Semantics.
5250
 *
5251 5252 5253
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
5254
 **/
5255
static int
5256
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
已提交
5257
{
J
James Smart 已提交
5258 5259
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
已提交
5260
	struct lpfc_nodelist *ndlp = NULL;
5261
	struct lpfc_scsi_event_header scsi_event;
5262 5263
	int match;
	int ret = SUCCESS, status, i;
5264 5265 5266 5267 5268 5269 5270

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));

5271 5272
	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
已提交
5273

5274
	status = fc_block_scsi_eh(cmnd);
5275
	if (status != 0 && status != SUCCESS)
5276
		return status;
5277

已提交
5278 5279 5280 5281 5282
	/*
	 * Since the driver manages a single bus device, reset all
	 * targets known to the driver.  Should any target reset
	 * fail, this routine returns failure to the midlayer.
	 */
5283
	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5284
		/* Search for mapped node by target ID */
已提交
5285
		match = 0;
J
James Smart 已提交
5286 5287
		spin_lock_irq(shost->host_lock);
		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5288 5289
			if (!NLP_CHK_NODE_ACT(ndlp))
				continue;
5290 5291 5292
			if (vport->phba->cfg_fcp2_no_tgt_reset &&
			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
				continue;
5293
			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5294
			    ndlp->nlp_sid == i &&
5295
			    ndlp->rport) {
已提交
5296 5297 5298 5299
				match = 1;
				break;
			}
		}
J
James Smart 已提交
5300
		spin_unlock_irq(shost->host_lock);
已提交
5301 5302
		if (!match)
			continue;
5303 5304 5305 5306 5307

		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
					i, 0, FCP_TARGET_RESET);

		if (status != SUCCESS) {
5308 5309 5310
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0700 Bus Reset on target %d failed\n",
					 i);
5311
			ret = FAILED;
已提交
5312 5313
		}
	}
5314
	/*
5315 5316 5317 5318
	 * We have to clean up i/o as : they may be orphaned by the TMFs
	 * above; or if any of the TMFs failed, they may be in an
	 * indeterminate state.
	 * We will report success if all the i/o aborts successfully.
5319
	 */
5320 5321 5322

	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
	if (status != SUCCESS)
5323
		ret = FAILED;
5324

5325 5326
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
已提交
5327 5328 5329
	return ret;
}

5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353
/**
 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does host reset to the adaptor port. It brings the HBA
 * offline, performs a board restart, and then brings the board back online.
 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
 * reject all outstanding SCSI commands to the host and error returned
 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
 * of error handling, it will only return error if resetting of the adapter
 * is not successful; in all other cases, will return success.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba *phba = vport->phba;
	int rc, ret = SUCCESS;

5354 5355 5356
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "3172 SCSI layer issued Host Reset Data:\n");

5357
	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5358 5359 5360 5361
	lpfc_offline(phba);
	rc = lpfc_sli_brdrestart(phba);
	if (rc)
		ret = FAILED;
5362 5363 5364
	rc = lpfc_online(phba);
	if (rc)
		ret = FAILED;
5365 5366
	lpfc_unblock_mgmt_io(phba);

5367 5368 5369 5370 5371
	if (ret == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "3323 Failed host reset, bring it offline\n");
		lpfc_sli4_offline_eratt(phba);
	}
5372 5373 5374
	return ret;
}

5375
/**
5376
 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387
 * @sdev: Pointer to scsi_device.
 *
 * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
 * globally available list of scsi buffers. This routine also makes sure scsi
 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
 * of scsi buffer exists for the lifetime of the driver.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
已提交
5388 5389 5390
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
J
James Smart 已提交
5391 5392
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5393
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5394
	uint32_t total = 0;
已提交
5395
	uint32_t num_to_alloc = 0;
5396
	int num_allocated = 0;
5397
	uint32_t sdev_cnt;
5398 5399 5400
	struct lpfc_device_data *device_data;
	unsigned long flags;
	struct lpfc_name target_wwpn;
已提交
5401

5402
	if (!rport || fc_remote_port_chkready(rport))
已提交
5403 5404
		return -ENXIO;

5405
	if (phba->cfg_fof) {
5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436

		/*
		 * Check to see if the device data structure for the lun
		 * exists.  If not, create one.
		 */

		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
		spin_lock_irqsave(&phba->devicelock, flags);
		device_data = __lpfc_get_device_data(phba,
						     &phba->luns,
						     &vport->fc_portname,
						     &target_wwpn,
						     sdev->lun);
		if (!device_data) {
			spin_unlock_irqrestore(&phba->devicelock, flags);
			device_data = lpfc_create_device_data(phba,
							&vport->fc_portname,
							&target_wwpn,
							sdev->lun, true);
			if (!device_data)
				return -ENOMEM;
			spin_lock_irqsave(&phba->devicelock, flags);
			list_add_tail(&device_data->listentry, &phba->luns);
		}
		device_data->rport_data = rport->dd_data;
		device_data->available = true;
		spin_unlock_irqrestore(&phba->devicelock, flags);
		sdev->hostdata = device_data;
	} else {
		sdev->hostdata = rport->dd_data;
	}
5437
	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
已提交
5438 5439 5440 5441

	/*
	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
	 * available list of scsi buffers.  Don't allocate more than the
5442 5443 5444
	 * HBA limit conveyed to the midlayer via the host structure.  The
	 * formula accounts for the lun_queue_depth + error handlers + 1
	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
已提交
5445 5446
	 */
	total = phba->total_scsi_bufs;
5447
	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5448

5449 5450 5451 5452
	/* If allocated buffers are enough do nothing */
	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
		return 0;

5453 5454
	/* Allow some exchanges to be available always to complete discovery */
	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5455 5456 5457
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0704 At limitation of %d preallocated "
				 "command buffers\n", total);
已提交
5458
		return 0;
5459 5460 5461
	/* Allow some exchanges to be available always to complete discovery */
	} else if (total + num_to_alloc >
		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5462 5463 5464 5465 5466 5467
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0705 Allocation request of %d "
				 "command buffers will exceed max of %d.  "
				 "Reducing allocation request to %d.\n",
				 num_to_alloc, phba->cfg_hba_queue_depth,
				 (phba->cfg_hba_queue_depth - total));
已提交
5468 5469
		num_to_alloc = phba->cfg_hba_queue_depth - total;
	}
5470 5471
	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
	if (num_to_alloc != num_allocated) {
5472 5473 5474 5475 5476
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0708 Allocation request of %d "
					 "command buffers did not succeed.  "
					 "Allocated %d buffers.\n",
					 num_to_alloc, num_allocated);
已提交
5477
	}
5478 5479
	if (num_allocated > 0)
		phba->total_scsi_bufs += num_allocated;
已提交
5480 5481 5482
	return 0;
}

5483
/**
5484
 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5485 5486 5487 5488 5489 5490 5491 5492 5493
 * @sdev: Pointer to scsi_device.
 *
 * This routine configures following items
 *   - Tag command queuing support for @sdev if supported.
 *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
 *
 * Return codes:
 *   0 - Success
 **/
已提交
5494 5495 5496
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
J
James Smart 已提交
5497 5498
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
5499

5500
	scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
已提交
5501

5502
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5503 5504
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5505 5506 5507 5508
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
5509 5510 5511
	return 0;
}

5512
/**
5513
 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5514 5515 5516 5517
 * @sdev: Pointer to scsi_device.
 *
 * This routine sets @sdev hostatdata filed to null.
 **/
已提交
5518 5519 5520
static void
lpfc_slave_destroy(struct scsi_device *sdev)
{
5521 5522
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5523 5524 5525
	unsigned long flags;
	struct lpfc_device_data *device_data = sdev->hostdata;

5526
	atomic_dec(&phba->sdev_cnt);
5527
	if ((phba->cfg_fof) && (device_data)) {
5528 5529 5530 5531 5532 5533
		spin_lock_irqsave(&phba->devicelock, flags);
		device_data->available = false;
		if (!device_data->oas_enabled)
			lpfc_delete_device_data(phba, device_data);
		spin_unlock_irqrestore(&phba->devicelock, flags);
	}
已提交
5534 5535 5536 5537
	sdev->hostdata = NULL;
	return;
}

5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565
/**
 * lpfc_create_device_data - creates and initializes device data structure for OAS
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun on target
 * @atomic_create: Flag to indicate if memory should be allocated using the
 *		  GFP_ATOMIC flag or not.
 *
 * This routine creates a device data structure which will contain identifying
 * information for the device (host wwpn, target wwpn, lun), state of OAS,
 * whether or not the corresponding lun is available by the system,
 * and pointer to the rport data.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_device_data - Success
 **/
struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
			struct lpfc_name *target_wwpn, uint64_t lun,
			bool atomic_create)
{

	struct lpfc_device_data *lun_info;
	int memory_flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5566
	    !(phba->cfg_fof))
5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603
		return NULL;

	/* Attempt to create the device data to contain lun info */

	if (atomic_create)
		memory_flags = GFP_ATOMIC;
	else
		memory_flags = GFP_KERNEL;
	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
	if (!lun_info)
		return NULL;
	INIT_LIST_HEAD(&lun_info->listentry);
	lun_info->rport_data  = NULL;
	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
	       sizeof(struct lpfc_name));
	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
	       sizeof(struct lpfc_name));
	lun_info->device_id.lun = lun;
	lun_info->oas_enabled = false;
	lun_info->available = false;
	return lun_info;
}

/**
 * lpfc_delete_device_data - frees a device data structure for OAS
 * @pha: Pointer to host bus adapter structure.
 * @lun_info: Pointer to device data structure to free.
 *
 * This routine frees the previously allocated device data structure passed.
 *
 **/
void
lpfc_delete_device_data(struct lpfc_hba *phba,
			struct lpfc_device_data *lun_info)
{

	if (unlikely(!phba) || !lun_info  ||
5604
	    !(phba->cfg_fof))
5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637
		return;

	if (!list_empty(&lun_info->listentry))
		list_del(&lun_info->listentry);
	mempool_free(lun_info, phba->device_data_mem_pool);
	return;
}

/**
 * __lpfc_get_device_data - returns the device data for the specified lun
 * @pha: Pointer to host bus adapter structure.
 * @list: Point to list to search.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun on target
 *
 * This routine searches the list passed for the specified lun's device data.
 * This function does not hold locks, it is the responsibility of the caller
 * to ensure the proper lock is held before calling the function.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_device_data - Success
 **/
struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
		       struct lpfc_name *vport_wwpn,
		       struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;

	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5638
	    !phba->cfg_fof)
5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699
		return NULL;

	/* Check to see if the lun is already enabled for OAS. */

	list_for_each_entry(lun_info, list, listentry) {
		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
			    sizeof(struct lpfc_name)) == 0) &&
		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
			    sizeof(struct lpfc_name)) == 0) &&
		    (lun_info->device_id.lun == lun))
			return lun_info;
	}

	return NULL;
}

/**
 * lpfc_find_next_oas_lun - searches for the next oas lun
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @starting_lun: Pointer to the lun to start searching for
 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
 * @found_target_wwpn: Pointer to the found lun's target wwpn information
 * @found_lun: Pointer to the found lun.
 * @found_lun_status: Pointer to status of the found lun.
 *
 * This routine searches the luns list for the specified lun
 * or the first lun for the vport/target.  If the vport wwpn contains
 * a zero value then a specific vport is not specified. In this case
 * any vport which contains the lun will be considered a match.  If the
 * target wwpn contains a zero value then a specific target is not specified.
 * In this case any target which contains the lun will be considered a
 * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
 * are returned.  The function will also return the next lun if available.
 * If the next lun is not found, starting_lun parameter will be set to
 * NO_MORE_OAS_LUN.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
bool
lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
		       struct lpfc_name *found_vport_wwpn,
		       struct lpfc_name *found_target_wwpn,
		       uint64_t *found_lun,
		       uint32_t *found_lun_status)
{

	unsigned long flags;
	struct lpfc_device_data *lun_info;
	struct lpfc_device_id *device_id;
	uint64_t lun;
	bool found = false;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
	    !starting_lun || !found_vport_wwpn ||
	    !found_target_wwpn || !found_lun || !found_lun_status ||
	    (*starting_lun == NO_MORE_OAS_LUN) ||
5700
	    !phba->cfg_fof)
5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783
		return false;

	lun = *starting_lun;
	*found_lun = NO_MORE_OAS_LUN;
	*starting_lun = NO_MORE_OAS_LUN;

	/* Search for lun or the lun closet in value */

	spin_lock_irqsave(&phba->devicelock, flags);
	list_for_each_entry(lun_info, &phba->luns, listentry) {
		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
			    sizeof(struct lpfc_name)) == 0)) &&
		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
			    sizeof(struct lpfc_name)) == 0)) &&
		    (lun_info->oas_enabled)) {
			device_id = &lun_info->device_id;
			if ((!found) &&
			    ((lun == FIND_FIRST_OAS_LUN) ||
			     (device_id->lun == lun))) {
				*found_lun = device_id->lun;
				memcpy(found_vport_wwpn,
				       &device_id->vport_wwpn,
				       sizeof(struct lpfc_name));
				memcpy(found_target_wwpn,
				       &device_id->target_wwpn,
				       sizeof(struct lpfc_name));
				if (lun_info->available)
					*found_lun_status =
						OAS_LUN_STATUS_EXISTS;
				else
					*found_lun_status = 0;
				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
					memset(vport_wwpn, 0x0,
					       sizeof(struct lpfc_name));
				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
					memset(target_wwpn, 0x0,
					       sizeof(struct lpfc_name));
				found = true;
			} else if (found) {
				*starting_lun = device_id->lun;
				memcpy(vport_wwpn, &device_id->vport_wwpn,
				       sizeof(struct lpfc_name));
				memcpy(target_wwpn, &device_id->target_wwpn,
				       sizeof(struct lpfc_name));
				break;
			}
		}
	}
	spin_unlock_irqrestore(&phba->devicelock, flags);
	return found;
}

/**
 * lpfc_enable_oas_lun - enables a lun for OAS operations
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun
 *
 * This routine enables a lun for oas operations.  The routines does so by
 * doing the following :
 *
 *   1) Checks to see if the device data for the lun has been created.
 *   2) If found, sets the OAS enabled flag if not set and returns.
 *   3) Otherwise, creates a device data structure.
 *   4) If successfully created, indicates the device data is for an OAS lun,
 *   indicates the lun is not available and add to the list of luns.
 *
 * Return codes:
 *   false - Error
 *   true - Success
 **/
bool
lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		    struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;
	unsigned long flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5784
	    !phba->cfg_fof)
5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840
		return false;

	spin_lock_irqsave(&phba->devicelock, flags);

	/* Check to see if the device data for the lun has been created */
	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
					  target_wwpn, lun);
	if (lun_info) {
		if (!lun_info->oas_enabled)
			lun_info->oas_enabled = true;
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}

	/* Create an lun info structure and add to list of luns */
	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
					   false);
	if (lun_info) {
		lun_info->oas_enabled = true;
		lun_info->available = false;
		list_add_tail(&lun_info->listentry, &phba->luns);
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}
	spin_unlock_irqrestore(&phba->devicelock, flags);
	return false;
}

/**
 * lpfc_disable_oas_lun - disables a lun for OAS operations
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun
 *
 * This routine disables a lun for oas operations.  The routines does so by
 * doing the following :
 *
 *   1) Checks to see if the device data for the lun is created.
 *   2) If present, clears the flag indicating this lun is for OAS.
 *   3) If the lun is not available by the system, the device data is
 *   freed.
 *
 * Return codes:
 *   false - Error
 *   true - Success
 **/
bool
lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		     struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;
	unsigned long flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5841
	    !phba->cfg_fof)
5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860
		return false;

	spin_lock_irqsave(&phba->devicelock, flags);

	/* Check to see if the lun is available. */
	lun_info = __lpfc_get_device_data(phba,
					  &phba->luns, vport_wwpn,
					  target_wwpn, lun);
	if (lun_info) {
		lun_info->oas_enabled = false;
		if (!lun_info->available)
			lpfc_delete_device_data(phba, lun_info);
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}

	spin_unlock_irqrestore(&phba->devicelock, flags);
	return false;
}
5861

5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886
struct scsi_host_template lpfc_template_s3 = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_hba_attrs,
	.max_sectors		= 0xFFFF,
	.vendor_id		= LPFC_NL_VENDOR_ID,
	.change_queue_depth	= scsi_change_queue_depth,
	.use_blk_tags		= 1,
	.track_queue_depth	= 1,
};

已提交
5887 5888 5889 5890 5891 5892
struct scsi_host_template lpfc_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
5893 5894
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
5895
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5896
	.eh_host_reset_handler  = lpfc_host_reset_handler,
已提交
5897 5898 5899
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
5900
	.scan_finished		= lpfc_scan_finished,
已提交
5901
	.this_id		= -1,
5902
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
已提交
5903 5904
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
J
James Smart 已提交
5905
	.shost_attrs		= lpfc_hba_attrs,
5906
	.max_sectors		= 0xFFFF,
5907
	.vendor_id		= LPFC_NL_VENDOR_ID,
5908
	.change_queue_depth	= scsi_change_queue_depth,
5909
	.use_blk_tags		= 1,
5910
	.track_queue_depth	= 1,
已提交
5911
};
5912 5913 5914 5915 5916 5917 5918

struct scsi_host_template lpfc_vport_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
5919 5920
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
5921 5922 5923 5924 5925 5926
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
5927
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5928 5929 5930 5931
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_vport_attrs,
	.max_sectors		= 0xFFFF,
5932
	.change_queue_depth	= scsi_change_queue_depth,
5933
	.use_blk_tags		= 1,
5934
	.track_queue_depth	= 1,
5935
};