lpfc_scsi.c 174.4 KB
Newer Older
1
/*******************************************************************
已提交
2
 * This file is part of the Emulex Linux Device Driver for         *
3
 * Fibre Channel Host Bus Adapters.                                *
4
 * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *
5
 * EMULEX and SLI are trademarks of Emulex.                        *
已提交
6
 * www.emulex.com                                                  *
7
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
已提交
8 9
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
10 11 12 13 14 15 16 17 18 19
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
已提交
20 21
 *******************************************************************/
#include <linux/pci.h>
22
#include <linux/slab.h>
已提交
23
#include <linux/interrupt.h>
24
#include <linux/export.h>
25
#include <linux/delay.h>
26
#include <asm/unaligned.h>
27 28
#include <linux/crc-t10dif.h>
#include <net/checksum.h>
已提交
29 30 31

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
32
#include <scsi/scsi_eh.h>
已提交
33 34 35 36 37
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>

#include "lpfc_version.h"
38
#include "lpfc_hw4.h"
已提交
39 40
#include "lpfc_hw.h"
#include "lpfc_sli.h"
41
#include "lpfc_sli4.h"
42
#include "lpfc_nl.h"
已提交
43 44
#include "lpfc_disc.h"
#include "lpfc.h"
45
#include "lpfc_scsi.h"
已提交
46 47
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
48
#include "lpfc_vport.h"
已提交
49 50 51 52

#define LPFC_RESET_WAIT  2
#define LPFC_ABORT_WAIT  2

53
int _dump_buf_done = 1;
54 55

static char *dif_op_str[] = {
56 57 58 59 60 61 62 63 64
	"PROT_NORMAL",
	"PROT_READ_INSERT",
	"PROT_WRITE_STRIP",
	"PROT_READ_STRIP",
	"PROT_WRITE_INSERT",
	"PROT_READ_PASS",
	"PROT_WRITE_PASS",
};

65 66 67 68 69 70
struct scsi_dif_tuple {
	__be16 guard_tag;       /* Checksum */
	__be16 app_tag;         /* Opaque storage */
	__be32 ref_tag;         /* Target LBA or indirect LBA */
};

71 72 73 74 75
static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
{
	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;

76
	if (vport->phba->cfg_fof)
77 78 79 80 81
		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
	else
		return (struct lpfc_rport_data *)sdev->hostdata;
}

82 83
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
84 85
static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
86 87
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
88 89

static void
90
lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
91 92 93 94 95
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_sglist(cmnd);

	if (!_dump_buf_data) {
96 97
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
98 99 100 101 102 103
				__func__);
		return;
	}


	if (!sgde) {
104 105
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9051 BLKGRD: ERROR: data scatterlist is null\n");
106 107 108 109 110 111 112 113 114 115 116 117 118
		return;
	}

	dst = (void *) _dump_buf_data;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

static void
119
lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
120 121 122 123 124
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_prot_sglist(cmnd);

	if (!_dump_buf_dif) {
125 126
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
127 128 129 130 131
				__func__);
		return;
	}

	if (!sgde) {
132 133
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
134 135 136 137 138 139 140 141 142 143 144 145
		return;
	}

	dst = _dump_buf_dif;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
{
	return sc->device->sector_size;
}

#define LPFC_CHECK_PROTECT_GUARD	1
#define LPFC_CHECK_PROTECT_REF		2
static inline unsigned
lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
{
	return 1;
}

static inline unsigned
lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
{
	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
		return 0;
	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
		return 1;
	return 0;
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
/**
 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called from the lpfc_prep_task_mgmt_cmd function to
 * set the last bit in the response sge entry.
 **/
static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
				struct lpfc_scsi_buf *lpfc_cmd)
{
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
	if (sgl) {
		sgl += 1;
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}
}

191
/**
192
 * lpfc_update_stats - Update statistical data for the command completion
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called when there is a command completion and this
 * function updates the statistical data for the command completion.
 **/
static void
lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
{
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	unsigned long flags;
	struct Scsi_Host  *shost = cmd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	unsigned long latency;
	int i;

	if (cmd->result)
		return;

214 215
	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);

216 217 218
	spin_lock_irqsave(shost->host_lock, flags);
	if (!vport->stat_data_enabled ||
		vport->stat_data_blocked ||
219
		!pnode ||
220 221 222 223 224 225 226 227 228
		!pnode->lat_data ||
		(phba->bucket_type == LPFC_NO_BUCKET)) {
		spin_unlock_irqrestore(shost->host_lock, flags);
		return;
	}

	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
			phba->bucket_step;
229 230 231 232 233
		/* check array subscript bounds */
		if (i < 0)
			i = 0;
		else if (i >= LPFC_MAX_BUCKET_COUNT)
			i = LPFC_MAX_BUCKET_COUNT - 1;
234 235 236 237 238 239 240 241 242 243 244
	} else {
		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
			if (latency <= (phba->bucket_base +
				((1<<i)*phba->bucket_step)))
				break;
	}

	pnode->lat_data[i].cmd_count++;
	spin_unlock_irqrestore(shost->host_lock, flags);
}

245
/**
246
 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
247 248 249 250 251 252 253 254 255
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called when there is resource error in driver or firmware.
 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
 * posts at most 1 event each second. This routine wakes up worker thread of
 * @phba to process WORKER_RAM_DOWN_EVENT event.
 *
 * This routine should be called with no lock held.
 **/
256
void
257
lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
258 259
{
	unsigned long flags;
260
	uint32_t evt_posted;
M
Manuel Schölling 已提交
261
	unsigned long expires;
262 263 264 265 266

	spin_lock_irqsave(&phba->hbalock, flags);
	atomic_inc(&phba->num_rsrc_err);
	phba->last_rsrc_error_time = jiffies;

M
Manuel Schölling 已提交
267 268
	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
	if (time_after(expires, jiffies)) {
269 270 271 272 273 274 275 276 277
		spin_unlock_irqrestore(&phba->hbalock, flags);
		return;
	}

	phba->last_ramp_down_time = jiffies;

	spin_unlock_irqrestore(&phba->hbalock, flags);

	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
278 279
	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
	if (!evt_posted)
280 281 282
		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);

283 284
	if (!evt_posted)
		lpfc_worker_wake_up(phba);
285 286 287
	return;
}

288
/**
289
 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
290 291 292 293 294 295
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
 * thread.This routine reduces queue depth for all scsi device on each vport
 * associated with @phba.
 **/
296 297 298
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
299 300
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
301
	struct scsi_device *sdev;
302
	unsigned long new_queue_depth;
303
	unsigned long num_rsrc_err, num_cmd_success;
304
	int i;
305 306 307 308

	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
	num_cmd_success = atomic_read(&phba->num_cmd_success);

309 310 311 312 313 314 315 316
	/*
	 * The error and success command counters are global per
	 * driver instance.  If another handler has already
	 * operated on this error event, just exit.
	 */
	if (num_rsrc_err == 0)
		return;

317 318
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
319
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
320 321
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
322
				new_queue_depth =
323 324 325 326 327 328 329
					sdev->queue_depth * num_rsrc_err /
					(num_rsrc_err + num_cmd_success);
				if (!new_queue_depth)
					new_queue_depth = sdev->queue_depth - 1;
				else
					new_queue_depth = sdev->queue_depth -
								new_queue_depth;
330
				scsi_change_queue_depth(sdev, new_queue_depth);
331
			}
332
		}
333
	lpfc_destroy_vport_work_array(phba, vports);
334 335 336 337
	atomic_set(&phba->num_rsrc_err, 0);
	atomic_set(&phba->num_cmd_success, 0);
}

338
/**
339
 * lpfc_scsi_dev_block - set all scsi hosts to block state
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
 * @phba: Pointer to HBA context object.
 *
 * This function walks vport list and set each SCSI host to block state
 * by invoking fc_remote_port_delete() routine. This function is invoked
 * with EEH when device's PCI slot has been permanently disabled.
 **/
void
lpfc_scsi_dev_block(struct lpfc_hba *phba)
{
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
	struct scsi_device *sdev;
	struct fc_rport *rport;
	int i;

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
357
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
358 359 360 361 362 363 364 365 366
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
				rport = starget_to_rport(scsi_target(sdev));
				fc_remote_port_delete(rport);
			}
		}
	lpfc_destroy_vport_work_array(phba, vports);
}

367
/**
368
 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
369
 * @vport: The virtual port for which this call being executed.
370
 * @num_to_allocate: The requested number of buffers to allocate.
371
 *
372 373 374 375 376 377
 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
 * the scsi buffer contains all the necessary information needed to initiate
 * a SCSI I/O. The non-DMAable buffer region contains information to build
 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
 * and the initial BPL. In addition to allocating memory, the FCP CMND and
 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
378 379
 *
 * Return codes:
380 381
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
382
 **/
383 384
static int
lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
已提交
385
{
J
James Smart 已提交
386
	struct lpfc_hba *phba = vport->phba;
已提交
387 388 389
	struct lpfc_scsi_buf *psb;
	struct ulp_bde64 *bpl;
	IOCB_t *iocb;
390 391 392
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
	dma_addr_t pdma_phys_bpl;
393
	uint16_t iotag;
394 395 396 397 398 399 400 401 402 403
	int bcnt, bpl_size;

	bpl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp), bpl_size);
已提交
404

405 406 407 408
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
已提交
409

410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
		/*
		 * Get memory from the pci pool to map the virt space to pci
		 * bus space for an I/O.  The DMA buffer includes space for the
		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
		 * necessary to support the sg_tablesize.
		 */
		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
					GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}

		/* Initialize virtual ptrs to dma_buf region. */
		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
					psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;

		psb->fcp_cmnd = psb->data;
		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
439
			sizeof(struct fcp_rsp);
已提交
440

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
		/* Initialize local short-hand pointers. */
		bpl = psb->fcp_bpl;
		pdma_phys_fcp_cmd = psb->dma_handle;
		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
			sizeof(struct fcp_rsp);

		/*
		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
		 * are sg list bdes.  Initialize the first two and leave the
		 * rest for queuecommand.
		 */
		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);

		/* Setup the physical region for the FCP RSP */
		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		if ((phba->sli_rev == 3) &&
				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
			/* fill in immediate fcp command BDE */
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
					unsli3.fcp_ext.icd);
			iocb->un.fcpi64.bdl.addrHigh = 0;
			iocb->ulpBdeCount = 0;
			iocb->ulpLe = 0;
L
Lucas De Marchi 已提交
482
			/* fill in response BDE */
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
							BUFF_TYPE_BDE_64;
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
				sizeof(struct fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrLow =
				putPaddrLow(pdma_phys_fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrHigh =
				putPaddrHigh(pdma_phys_fcp_rsp);
		} else {
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
			iocb->un.fcpi64.bdl.bdeSize =
					(2 * sizeof(struct ulp_bde64));
			iocb->un.fcpi64.bdl.addrLow =
					putPaddrLow(pdma_phys_bpl);
			iocb->un.fcpi64.bdl.addrHigh =
					putPaddrHigh(pdma_phys_bpl);
			iocb->ulpBdeCount = 1;
			iocb->ulpLe = 1;
		}
		iocb->ulpClass = CLASS3;
		psb->status = IOSTAT_SUCCESS;
504
		/* Put it back into the SCSI buffer list */
J
James Smart 已提交
505
		psb->cur_iocbq.context1  = psb;
506
		lpfc_release_scsi_buf_s3(phba, psb);
已提交
507

508
	}
已提交
509

510
	return bcnt;
已提交
511 512
}

513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
/**
 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
 * @vport: pointer to lpfc vport data structure.
 *
 * This routine is invoked by the vport cleanup for deletions and the cleanup
 * for an ndlp on removal.
 **/
void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;

	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	list_for_each_entry_safe(psb, next_psb,
				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->rdata && psb->rdata->pnode
			&& psb->rdata->pnode->vport == vport)
			psb->rdata = NULL;
	}
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	spin_unlock_irqrestore(&phba->hbalock, iflag);
}

539 540 541 542 543 544 545 546 547 548 549 550 551
/**
 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
 * @phba: pointer to lpfc hba data structure.
 * @axri: pointer to the fcp xri abort wcqe structure.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
 * FCP aborted xri.
 **/
void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
			  struct sli4_wcqe_xri_aborted *axri)
{
	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
552
	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
553 554
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;
555 556
	struct lpfc_iocbq *iocbq;
	int i;
557 558
	struct lpfc_nodelist *ndlp;
	int rrq_empty = 0;
559
	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
560

561 562
	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
563 564 565 566
	list_for_each_entry_safe(psb, next_psb,
		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->cur_iocbq.sli4_xritag == xri) {
			list_del(&psb->list);
567
			psb->exch_busy = 0;
568
			psb->status = IOSTAT_SUCCESS;
569 570
			spin_unlock(
				&phba->sli4_hba.abts_scsi_buf_list_lock);
571 572 573 574 575
			if (psb->rdata && psb->rdata->pnode)
				ndlp = psb->rdata->pnode;
			else
				ndlp = NULL;

576
			rrq_empty = list_empty(&phba->active_rrq_list);
577
			spin_unlock_irqrestore(&phba->hbalock, iflag);
578
			if (ndlp) {
579 580
				lpfc_set_rrq_active(phba, ndlp,
					psb->cur_iocbq.sli4_lxritag, rxid, 1);
581 582
				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
			}
583
			lpfc_release_scsi_buf_s4(phba, psb);
584 585
			if (rrq_empty)
				lpfc_worker_wake_up(phba);
586 587 588
			return;
		}
	}
589 590 591 592 593 594 595 596 597 598 599 600
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	for (i = 1; i <= phba->sli.last_iotag; i++) {
		iocbq = phba->sli.iocbq_lookup[i];

		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
			continue;
		if (iocbq->sli4_xritag != xri)
			continue;
		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
		psb->exch_busy = 0;
		spin_unlock_irqrestore(&phba->hbalock, iflag);
601
		if (!list_empty(&pring->txq))
602
			lpfc_worker_wake_up(phba);
603 604 605 606
		return;

	}
	spin_unlock_irqrestore(&phba->hbalock, iflag);
607 608 609
}

/**
610
 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
611
 * @phba: pointer to lpfc hba data structure.
612
 * @post_sblist: pointer to the scsi buffer list.
613
 *
614 615 616 617 618 619
 * This routine walks a list of scsi buffers that was passed in. It attempts
 * to construct blocks of scsi buffer sgls which contains contiguous xris and
 * uses the non-embedded SGL block post mailbox commands to post to the port.
 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
 * embedded SGL post mailbox command for posting. The @post_sblist passed in
 * must be local list, thus no lock is needed when manipulate the list.
620
 *
621
 * Returns: 0 = failure, non-zero number of successfully posted buffers.
622
 **/
623
static int
624 625
lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
			     struct list_head *post_sblist, int sb_count)
626
{
627
	struct lpfc_scsi_buf *psb, *psb_next;
628
	int status, sgl_size;
629 630 631 632 633 634 635 636 637 638 639
	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
	dma_addr_t pdma_phys_bpl1;
	int last_xritag = NO_XRI;
	LIST_HEAD(prep_sblist);
	LIST_HEAD(blck_sblist);
	LIST_HEAD(scsi_sblist);

	/* sanity check */
	if (sb_count <= 0)
		return -EINVAL;

640 641 642
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
		list_del_init(&psb->list);
		block_cnt++;
		if ((last_xritag != NO_XRI) &&
		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
			/* a hole in xri block, form a sgl posting block */
			list_splice_init(&prep_sblist, &blck_sblist);
			post_cnt = block_cnt - 1;
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			block_cnt = 1;
		} else {
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			/* enough sgls for non-embed sgl mbox command */
			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
				block_cnt = 0;
662
			}
663 664 665
		}
		num_posting++;
		last_xritag = psb->cur_iocbq.sli4_xritag;
666

667 668 669 670 671 672 673 674
		/* end of repost sgl list condition for SCSI buffers */
		if (num_posting == sb_count) {
			if (post_cnt == 0) {
				/* last sgl posting block */
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
			} else if (block_cnt == 1) {
				/* last single sgl with non-contiguous xri */
675
				if (sgl_size > SGL_PAGE_SIZE)
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
					pdma_phys_bpl1 = psb->dma_phys_bpl +
								SGL_PAGE_SIZE;
				else
					pdma_phys_bpl1 = 0;
				status = lpfc_sli4_post_sgl(phba,
						psb->dma_phys_bpl,
						pdma_phys_bpl1,
						psb->cur_iocbq.sli4_xritag);
				if (status) {
					/* failure, put on abort scsi list */
					psb->exch_busy = 1;
				} else {
					/* success, put on SCSI buffer list */
					psb->exch_busy = 0;
					psb->status = IOSTAT_SUCCESS;
					num_posted++;
				}
				/* success, put on SCSI buffer sgl list */
				list_add_tail(&psb->list, &scsi_sblist);
			}
		}
697

698 699
		/* continue until a nembed page worth of sgls */
		if (post_cnt == 0)
700
			continue;
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716

		/* post block of SCSI buffer list sgls */
		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
						       post_cnt);

		/* don't reset xirtag due to hole in xri block */
		if (block_cnt == 0)
			last_xritag = NO_XRI;

		/* reset SCSI buffer post count for next round of posting */
		post_cnt = 0;

		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
		while (!list_empty(&blck_sblist)) {
			list_remove_head(&blck_sblist, psb,
					 struct lpfc_scsi_buf, list);
717
			if (status) {
718
				/* failure, put on abort scsi list */
719 720
				psb->exch_busy = 1;
			} else {
721
				/* success, put on SCSI buffer list */
722
				psb->exch_busy = 0;
723
				psb->status = IOSTAT_SUCCESS;
724
				num_posted++;
725
			}
726
			list_add_tail(&psb->list, &scsi_sblist);
727 728
		}
	}
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	/* Push SCSI buffers with sgl posted to the availble list */
	while (!list_empty(&scsi_sblist)) {
		list_remove_head(&scsi_sblist, psb,
				 struct lpfc_scsi_buf, list);
		lpfc_release_scsi_buf_s4(phba, psb);
	}
	return num_posted;
}

/**
 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
 * @phba: pointer to lpfc hba data structure.
 *
 * This routine walks the list of scsi buffers that have been allocated and
 * repost them to the port by using SGL block post. This is needed after a
 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
 *
 * Returns: 0 = success, non-zero failure.
 **/
int
lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
{
	LIST_HEAD(post_sblist);
	int num_posted, rc = 0;

	/* get all SCSI buffers need to repost to a local list */
757
	spin_lock_irq(&phba->scsi_buf_list_get_lock);
758
	spin_lock(&phba->scsi_buf_list_put_lock);
759 760
	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
761
	spin_unlock(&phba->scsi_buf_list_put_lock);
762
	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
763 764 765 766 767 768 769 770 771

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist)) {
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
						phba->sli4_hba.scsi_xri_cnt);
		/* failed to post any scsi buffer, return error */
		if (num_posted == 0)
			rc = -EIO;
	}
772 773 774 775 776 777 778 779
	return rc;
}

/**
 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
780
 * This routine allocates scsi buffers for device with SLI-4 interface spec,
781
 * the scsi buffer contains all the necessary information needed to initiate
782 783
 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
 * them on a list, it post them to the port by using SGL block post.
784 785
 *
 * Return codes:
786
 *   int - number of scsi buffers that were allocated and posted.
787 788 789 790 791 792 793 794 795 796 797
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static int
lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb;
	struct sli4_sge *sgl;
	IOCB_t *iocb;
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
798
	dma_addr_t pdma_phys_bpl;
799
	uint16_t iotag, lxri = 0;
800
	int bcnt, num_posted, sgl_size;
801 802 803
	LIST_HEAD(prep_sblist);
	LIST_HEAD(post_sblist);
	LIST_HEAD(scsi_sblist);
804

805 806 807 808 809 810 811 812 813
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp));

814 815 816 817 818
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
		/*
819 820 821 822
		 * Get memory from the pci pool to map the virt space to
		 * pci bus space for an I/O. The DMA buffer includes space
		 * for the struct fcp_cmnd, struct fcp_rsp and the number
		 * of bde's necessary to support the sg_tablesize.
823 824 825 826 827 828 829 830 831
		 */
		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
						GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}
		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

832 833 834 835 836 837
		/*
		 * 4K Page alignment is CRITICAL to BlockGuard, double check
		 * to be sure.
		 */
		if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
838 839 840 841 842 843
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
				      psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}

844 845 846

		lxri = lpfc_sli4_next_xritag(phba);
		if (lxri == NO_XRI) {
847
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
848
			      psb->data, psb->dma_handle);
849 850 851 852
			kfree(psb);
			break;
		}

853 854 855
		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
856
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
857
				psb->data, psb->dma_handle);
858
			kfree(psb);
859 860 861 862
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"3368 Failed to allocated IOTAG for"
					" XRI:0x%x\n", lxri);
			lpfc_sli4_free_xri(phba, lxri);
863 864
			break;
		}
865 866
		psb->cur_iocbq.sli4_lxritag = lxri;
		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
867 868
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
		psb->fcp_bpl = psb->data;
869
		psb->fcp_cmnd = (psb->data + sgl_size);
870 871 872 873 874 875
		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
					sizeof(struct fcp_cmnd));

		/* Initialize local short-hand pointers. */
		sgl = (struct sli4_sge *)psb->fcp_bpl;
		pdma_phys_bpl = psb->dma_handle;
876
		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
877 878 879
		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);

		/*
880 881 882
		 * The first two bdes are the FCP_CMD and FCP_RSP.
		 * The balance are sg list bdes. Initialize the
		 * first two and leave the rest for queuecommand.
883 884 885
		 */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
886
		sgl->word2 = le32_to_cpu(sgl->word2);
887 888
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
889
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
890 891 892 893 894
		sgl++;

		/* Setup the physical region for the FCP RSP */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
895
		sgl->word2 = le32_to_cpu(sgl->word2);
896 897
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
898
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
		/* setting the BLP size to 2 * sizeof BDE may not be correct.
		 * We are setting the bpl to point to out sgl. An sgl's
		 * entries are 16 bytes, a bpl entries are 12 bytes.
		 */
		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
		iocb->ulpBdeCount = 1;
		iocb->ulpLe = 1;
		iocb->ulpClass = CLASS3;
917
		psb->cur_iocbq.context1 = psb;
918 919
		psb->dma_phys_bpl = pdma_phys_bpl;

920 921
		/* add the scsi buffer to a post list */
		list_add_tail(&psb->list, &post_sblist);
922
		spin_lock_irq(&phba->scsi_buf_list_get_lock);
923
		phba->sli4_hba.scsi_xri_cnt++;
924
		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
925 926 927 928 929 930 931 932 933 934 935 936 937
	}
	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
			"3021 Allocate %d out of %d requested new SCSI "
			"buffers\n", bcnt, num_to_alloc);

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist))
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
							  &post_sblist, bcnt);
	else
		num_posted = 0;

	return num_posted;
938 939
}

940
/**
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
 * This routine wraps the actual SCSI buffer allocator function pointer from
 * the lpfc_hba struct.
 *
 * Return codes:
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static inline int
lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
{
	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
}

/**
959
 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
960
 * @phba: The HBA for which this call is being executed.
961 962 963 964 965 966 967 968
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
969
static struct lpfc_scsi_buf*
970
lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
已提交
971
{
972
	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
973
	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
974
	unsigned long iflag = 0;
975

976
	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
977 978 979
	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
			 list);
	if (!lpfc_cmd) {
980
		spin_lock(&phba->scsi_buf_list_put_lock);
981 982 983 984 985
		list_splice(&phba->lpfc_scsi_buf_list_put,
			    &phba->lpfc_scsi_buf_list_get);
		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
		list_remove_head(scsi_buf_list_get, lpfc_cmd,
				 struct lpfc_scsi_buf, list);
986
		spin_unlock(&phba->scsi_buf_list_put_lock);
987
	}
988
	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
989 990
	return  lpfc_cmd;
}
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
/**
 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
1005
	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1006
	unsigned long iflag = 0;
1007 1008
	int found = 0;

1009
	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1010 1011
	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
				 &phba->lpfc_scsi_buf_list_get, list) {
1012
		if (lpfc_test_rrq_active(phba, ndlp,
1013
					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1014 1015
			continue;
		list_del(&lpfc_cmd->list);
1016
		found = 1;
1017
		break;
1018
	}
1019
	if (!found) {
1020
		spin_lock(&phba->scsi_buf_list_put_lock);
1021 1022 1023
		list_splice(&phba->lpfc_scsi_buf_list_put,
			    &phba->lpfc_scsi_buf_list_get);
		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1024
		spin_unlock(&phba->scsi_buf_list_put_lock);
1025 1026
		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
					 &phba->lpfc_scsi_buf_list_get, list) {
1027 1028 1029 1030 1031 1032 1033 1034
			if (lpfc_test_rrq_active(
				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
				continue;
			list_del(&lpfc_cmd->list);
			found = 1;
			break;
		}
	}
1035
	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1036 1037
	if (!found)
		return NULL;
1038
	return  lpfc_cmd;
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
}
/**
 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
	return  phba->lpfc_get_scsi_buf(phba, ndlp);
}
已提交
1056

1057
/**
1058
 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1059 1060 1061 1062 1063 1064
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
1065
static void
1066
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1067
{
1068
	unsigned long iflag = 0;
已提交
1069

1070 1071 1072 1073 1074
	psb->seg_cnt = 0;
	psb->nonsg_phys = 0;
	psb->prot_seg_cnt = 0;

	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1075
	psb->pCmd = NULL;
1076
	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1077 1078
	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
已提交
1079 1080
}

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
/**
 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
 * and cannot be reused for at least RA_TOV amount of time if it was
 * aborted.
 **/
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
	unsigned long iflag = 0;

1096 1097 1098 1099
	psb->seg_cnt = 0;
	psb->nonsg_phys = 0;
	psb->prot_seg_cnt = 0;

1100
	if (psb->exch_busy) {
1101 1102 1103 1104 1105 1106 1107 1108 1109
		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
		psb->pCmd = NULL;
		list_add_tail(&psb->list,
			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
	} else {
		psb->pCmd = NULL;
1110
		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1111 1112 1113
		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1114 1115 1116
	}
}

1117
/**
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{

	phba->lpfc_release_scsi_buf(phba, psb);
}

/**
 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1134 1135 1136 1137
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1138 1139 1140
 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
 * through sg elements and format the bdea. This routine also initializes all
 * IOCB fields which are dependent on scsi command request buffer.
1141 1142 1143 1144 1145
 *
 * Return codes:
 *   1 - Error
 *   0 - Success
 **/
已提交
1146
static int
1147
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
已提交
1148 1149 1150 1151 1152
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1153
	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
已提交
1154
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1155
	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
已提交
1156
	dma_addr_t physaddr;
1157
	uint32_t num_bde = 0;
1158
	int nseg, datadir = scsi_cmnd->sc_data_direction;
已提交
1159 1160 1161 1162 1163 1164 1165 1166

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	bpl += 2;
1167
	if (scsi_sg_count(scsi_cmnd)) {
已提交
1168 1169 1170 1171 1172 1173 1174
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

1175 1176 1177 1178 1179
		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
				  scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!nseg))
			return 1;

1180
		lpfc_cmd->seg_cnt = nseg;
已提交
1181
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1182 1183
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9064 BLKGRD: %s: Too many sg segments from "
1184
			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1185
			       __func__, phba->cfg_sg_seg_cnt,
已提交
1186
			       lpfc_cmd->seg_cnt);
1187
			lpfc_cmd->seg_cnt = 0;
1188
			scsi_dma_unmap(scsi_cmnd);
已提交
1189 1190 1191 1192 1193 1194 1195 1196
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the bde's.
1197 1198 1199
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
已提交
1200
		 */
1201
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
已提交
1202
			physaddr = sg_dma_address(sgel);
1203
			if (phba->sli_rev == 3 &&
1204
			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1205
			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
				data_bde->addrLow = putPaddrLow(physaddr);
				data_bde->addrHigh = putPaddrHigh(physaddr);
				data_bde++;
			} else {
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				bpl->tus.f.bdeSize = sg_dma_len(sgel);
				bpl->tus.w = le32_to_cpu(bpl->tus.w);
				bpl->addrLow =
					le32_to_cpu(putPaddrLow(physaddr));
				bpl->addrHigh =
					le32_to_cpu(putPaddrHigh(physaddr));
				bpl++;
			}
已提交
1222
		}
1223
	}
已提交
1224 1225 1226

	/*
	 * Finish initializing those IOCB fields that are dependent on the
1227 1228 1229
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized and for SLI-3 the extended bde count is
	 * explicitly reinitialized since all iocb memory resources are reused.
已提交
1230
	 */
1231
	if (phba->sli_rev == 3 &&
1232 1233
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
			/*
			 * The extended IOCB format can only fit 3 BDE or a BPL.
			 * This I/O has more than 3 BDE so the 1st data bde will
			 * be a BPL that is filled in here.
			 */
			physaddr = lpfc_cmd->dma_handle;
			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
			data_bde->tus.f.bdeSize = (num_bde *
						   sizeof(struct ulp_bde64));
			physaddr += (sizeof(struct fcp_cmnd) +
				     sizeof(struct fcp_rsp) +
				     (2 * sizeof(struct ulp_bde64)));
			data_bde->addrHigh = putPaddrHigh(physaddr);
			data_bde->addrLow = putPaddrLow(physaddr);
L
Lucas De Marchi 已提交
1249
			/* ebde count includes the response bde and data bpl */
1250 1251
			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
		} else {
L
Lucas De Marchi 已提交
1252
			/* ebde count includes the response bde and data bdes */
1253 1254 1255 1256 1257
			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
		}
	} else {
		iocb_cmd->un.fcpi64.bdl.bdeSize =
			((num_bde + 2) * sizeof(struct ulp_bde64));
1258
		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1259
	}
1260
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1261 1262 1263 1264 1265

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
1266
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1267 1268 1269
	return 0;
}

1270
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1271

1272 1273 1274 1275 1276 1277 1278 1279
/* Return if if error injection is detected by Initiator */
#define BG_ERR_INIT	0x1
/* Return if if error injection is detected by Target */
#define BG_ERR_TGT	0x2
/* Return if if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP	0x10
/* Return if disabling Guard/Ref/App checking is required for error injection */
#define BG_ERR_CHECK	0x20
1280 1281 1282 1283

/**
 * lpfc_bg_err_inject - Determine if we should inject an error
 * @phba: The Hba for which this call is being executed.
1284 1285 1286 1287 1288
 * @sc: The SCSI command to examine
 * @reftag: (out) BlockGuard reference tag for transmitted data
 * @apptag: (out) BlockGuard application tag for transmitted data
 * @new_guard (in) Value to replace CRC with if needed
 *
1289
 * Returns BG_ERR_* bit mask or 0 if request ignored
1290
 **/
1291 1292 1293 1294 1295 1296
static int
lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
1297
	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1298
	struct scsi_dif_tuple *src = NULL;
1299 1300
	struct lpfc_nodelist *ndlp;
	struct lpfc_rport_data *rdata;
1301 1302 1303 1304 1305
	uint32_t op = scsi_get_prot_op(sc);
	uint32_t blksize;
	uint32_t numblks;
	sector_t lba;
	int rc = 0;
1306
	int blockoff = 0;
1307 1308 1309 1310

	if (op == SCSI_PROT_NORMAL)
		return 0;

1311 1312
	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);
1313
	lba = scsi_get_lba(sc);
1314 1315

	/* First check if we need to match the LBA */
1316 1317 1318 1319 1320 1321 1322 1323
	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
		blksize = lpfc_cmd_blksize(sc);
		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;

		/* Make sure we have the right LBA if one is specified */
		if ((phba->lpfc_injerr_lba < lba) ||
			(phba->lpfc_injerr_lba >= (lba + numblks)))
			return 0;
1324 1325 1326 1327 1328 1329 1330
		if (sgpe) {
			blockoff = phba->lpfc_injerr_lba - lba;
			numblks = sg_dma_len(sgpe) /
				sizeof(struct scsi_dif_tuple);
			if (numblks < blockoff)
				blockoff = numblks;
		}
1331 1332
	}

1333
	/* Next check if we need to match the remote NPortID or WWPN */
1334
	rdata = lpfc_rport_data_from_scsi_device(sc->device);
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
	if (rdata && rdata->pnode) {
		ndlp = rdata->pnode;

		/* Make sure we have the right NPortID if one is specified */
		if (phba->lpfc_injerr_nportid  &&
			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
			return 0;

		/*
		 * Make sure we have the right WWPN if one is specified.
		 * wwn[0] should be a non-zero NAA in a good WWPN.
		 */
		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
				sizeof(struct lpfc_name)) != 0))
			return 0;
	}

	/* Setup a ptr to the protection data if the SCSI host provides it */
	if (sgpe) {
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
		src += blockoff;
		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
	}

1360 1361
	/* Should we change the Reference Tag */
	if (reftag) {
1362 1363 1364
		if (phba->lpfc_injerr_wref_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1365 1366 1367 1368 1369 1370 1371 1372
				if (src) {
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */
1373 1374 1375 1376 1377

					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9076 BLKGRD: Injecting reftag error: "
					"write lba x%lx + x%x oldrefTag x%x\n",
					(unsigned long)lba, blockoff,
1378
					be32_to_cpu(src->ref_tag));
1379

1380
					/*
1381 1382
					 * Save the old ref_tag so we can
					 * restore it on completion.
1383
					 */
1384 1385 1386 1387 1388 1389 1390 1391 1392
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_REFTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->ref_tag;
					}
					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1393
					phba->lpfc_injerr_wref_cnt--;
1394 1395 1396 1397 1398 1399 1400
					if (phba->lpfc_injerr_wref_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1401 1402
					rc = BG_ERR_TGT | BG_ERR_CHECK;

1403 1404 1405
					break;
				}
				/* Drop thru */
1406
			case SCSI_PROT_WRITE_INSERT:
1407
				/*
1408 1409 1410
				 * For WRITE_INSERT, force the error
				 * to be sent on the wire. It should be
				 * detected by the Target.
1411
				 */
1412
				/* DEADBEEF will be the reftag on the wire */
1413 1414
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1415 1416 1417 1418 1419 1420 1421
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
					LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1422
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1423 1424

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1425
					"9078 BLKGRD: Injecting reftag error: "
1426 1427
					"write lba x%lx\n", (unsigned long)lba);
				break;
1428
			case SCSI_PROT_WRITE_STRIP:
1429
				/*
1430 1431 1432
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1433
				 */
1434 1435
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1436 1437 1438 1439 1440 1441 1442
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1443
				rc = BG_ERR_INIT;
1444 1445

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1446
					"9077 BLKGRD: Injecting reftag error: "
1447
					"write lba x%lx\n", (unsigned long)lba);
1448
				break;
1449
			}
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
		}
		if (phba->lpfc_injerr_rref_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1461 1462
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_rref_cnt--;
1463 1464 1465 1466 1467 1468 1469
				if (phba->lpfc_injerr_rref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1470
				rc = BG_ERR_INIT;
1471 1472

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1473
					"9079 BLKGRD: Injecting reftag error: "
1474
					"read lba x%lx\n", (unsigned long)lba);
1475
				break;
1476 1477 1478 1479 1480 1481
			}
		}
	}

	/* Should we change the Application Tag */
	if (apptag) {
1482 1483 1484
		if (phba->lpfc_injerr_wapp_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1485
				if (src) {
1486 1487 1488 1489 1490 1491 1492 1493
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */

1494 1495 1496 1497
					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9080 BLKGRD: Injecting apptag error: "
					"write lba x%lx + x%x oldappTag x%x\n",
					(unsigned long)lba, blockoff,
1498
					be16_to_cpu(src->app_tag));
1499 1500

					/*
1501 1502
					 * Save the old app_tag so we can
					 * restore it on completion.
1503
					 */
1504 1505 1506 1507 1508 1509 1510 1511 1512
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_APPTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->app_tag;
					}
					src->app_tag = cpu_to_be16(0xDEAD);
1513
					phba->lpfc_injerr_wapp_cnt--;
1514 1515 1516 1517 1518 1519 1520
					if (phba->lpfc_injerr_wapp_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1521
					rc = BG_ERR_TGT | BG_ERR_CHECK;
1522 1523 1524
					break;
				}
				/* Drop thru */
1525
			case SCSI_PROT_WRITE_INSERT:
1526
				/*
1527 1528 1529
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1530
				 */
1531
				/* DEAD will be the apptag on the wire */
1532 1533
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1534 1535 1536 1537 1538 1539 1540
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1541
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1542

1543
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1544
					"0813 BLKGRD: Injecting apptag error: "
1545 1546
					"write lba x%lx\n", (unsigned long)lba);
				break;
1547
			case SCSI_PROT_WRITE_STRIP:
1548
				/*
1549 1550 1551
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1552
				 */
1553 1554
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1555 1556 1557 1558 1559 1560 1561
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1562
				rc = BG_ERR_INIT;
1563 1564

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1565
					"0812 BLKGRD: Injecting apptag error: "
1566
					"write lba x%lx\n", (unsigned long)lba);
1567
				break;
1568
			}
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
		}
		if (phba->lpfc_injerr_rapp_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1580 1581
				*apptag = 0xDEAD;
				phba->lpfc_injerr_rapp_cnt--;
1582 1583 1584 1585 1586 1587 1588
				if (phba->lpfc_injerr_rapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1589
				rc = BG_ERR_INIT;
1590 1591

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1592
					"0814 BLKGRD: Injecting apptag error: "
1593
					"read lba x%lx\n", (unsigned long)lba);
1594
				break;
1595 1596 1597 1598
			}
		}
	}

1599

1600
	/* Should we change the Guard Tag */
1601 1602 1603 1604
	if (new_guard) {
		if (phba->lpfc_injerr_wgrd_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1605
				rc = BG_ERR_CHECK;
1606
				/* Drop thru */
1607 1608

			case SCSI_PROT_WRITE_INSERT:
1609
				/*
1610 1611 1612
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1613 1614
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1615 1616 1617 1618 1619 1620 1621
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1622

1623
				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1624
				/* Signals the caller to swap CRC->CSUM */
1625

1626
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1627
					"0817 BLKGRD: Injecting guard error: "
1628 1629
					"write lba x%lx\n", (unsigned long)lba);
				break;
1630
			case SCSI_PROT_WRITE_STRIP:
1631
				/*
1632 1633 1634
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1635 1636
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1637 1638 1639 1640 1641 1642 1643
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1644

1645
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1646 1647 1648
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1649
					"0816 BLKGRD: Injecting guard error: "
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
					"write lba x%lx\n", (unsigned long)lba);
				break;
			}
		}
		if (phba->lpfc_injerr_rgrd_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
				phba->lpfc_injerr_rgrd_cnt--;
1665 1666 1667 1668 1669 1670 1671
				if (phba->lpfc_injerr_rgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1672

1673
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1674 1675 1676 1677 1678 1679
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"0818 BLKGRD: Injecting guard error: "
					"read lba x%lx\n", (unsigned long)lba);
			}
1680 1681
		}
	}
1682

1683 1684 1685 1686
	return rc;
}
#endif

1687 1688 1689 1690
/**
 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
 * the specified SCSI command.
 * @phba: The Hba for which this call is being executed.
1691 1692 1693 1694 1695 1696
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
1697
 **/
1698
static int
1699 1700
lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
1701
{
1702
	uint8_t ret = 0;
1703

1704
	if (lpfc_cmd_guard_csum(sc)) {
1705 1706 1707
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
1708
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1709
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1710 1711 1712 1713
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1714
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1715
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1716 1717
			break;

1718 1719
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1720
			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1721
			*txop = BG_OP_IN_CSUM_OUT_CRC;
1722 1723 1724 1725
			break;

		case SCSI_PROT_NORMAL:
		default:
1726
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1727 1728
				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
					scsi_get_prot_op(sc));
1729
			ret = 1;
1730 1731 1732
			break;

		}
J
James Smart 已提交
1733
	} else {
1734 1735 1736
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1737
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1738
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1739 1740 1741 1742
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1743
			*rxop = BG_OP_IN_CRC_OUT_CRC;
1744
			*txop = BG_OP_IN_CRC_OUT_CRC;
1745 1746 1747 1748
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
J
James Smart 已提交
1749
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1750
			*txop = BG_OP_IN_CRC_OUT_NODIF;
J
James Smart 已提交
1751 1752
			break;

1753 1754
		case SCSI_PROT_NORMAL:
		default:
1755
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1756 1757
				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
					scsi_get_prot_op(sc));
1758
			ret = 1;
1759 1760 1761 1762
			break;
		}
	}

1763
	return ret;
1764 1765
}

1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
 * the specified SCSI command in order to force a guard tag error.
 * @phba: The Hba for which this call is being executed.
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
 **/
static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
{
	uint8_t ret = 0;

1784
	if (lpfc_cmd_guard_csum(sc)) {
1785 1786 1787 1788
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1789
			*txop = BG_OP_IN_CRC_OUT_NODIF;
1790 1791 1792 1793 1794
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1795
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1796 1797 1798 1799
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1800
			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1801
			*txop = BG_OP_IN_CRC_OUT_CSUM;
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;

		}
	} else {
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1814
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1815 1816 1817 1818
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1819
			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1820
			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1821 1822 1823 1824 1825
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1826
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;
		}
	}

	return ret;
}
#endif

/**
 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
1847 1848 1849 1850 1851 1852 1853 1854 1855
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
1856 1857 1858
 *   start of prot group  -->     |          PDE_5          |
 *                                +-------------------------+
 *                                |          PDE_6          |
1859 1860 1861 1862 1863 1864 1865 1866
 *                                +-------------------------+
 *                                |         Data BDE        |
 *                                +-------------------------+
 *                                |more Data BDE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
1867 1868 1869
 *
 * Returns the number of BDEs added to the BPL.
 **/
1870 1871 1872 1873 1874
static int
lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
1875 1876
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
1877
	dma_addr_t physaddr;
1878
	int i = 0, num_bde = 0, status;
1879
	int datadir = sc->sc_data_direction;
1880
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1881
	uint32_t rc;
1882
#endif
1883
	uint32_t checking = 1;
1884
	uint32_t reftag;
J
James Smart 已提交
1885
	unsigned blksize;
1886
	uint8_t txop, rxop;
1887

1888 1889
	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
1890 1891
		goto out;

1892
	/* extract some info from the scsi command for pde*/
1893
	blksize = lpfc_cmd_blksize(sc);
1894
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1895

1896
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1897
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1898
	if (rc) {
1899
		if (rc & BG_ERR_SWAP)
1900
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1901
		if (rc & BG_ERR_CHECK)
1902 1903
			checking = 0;
	}
1904 1905
#endif

1906 1907 1908 1909 1910
	/* setup PDE5 with what we have */
	pde5 = (struct lpfc_pde5 *) bpl;
	memset(pde5, 0, sizeof(struct lpfc_pde5));
	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

1911
	/* Endianness conversion if necessary for PDE5 */
1912
	pde5->word0 = cpu_to_le32(pde5->word0);
J
James Smart 已提交
1913
	pde5->reftag = cpu_to_le32(reftag);
1914

1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
	/* advance bpl and increment bde count */
	num_bde++;
	bpl++;
	pde6 = (struct lpfc_pde6 *) bpl;

	/* setup PDE6 with the rest of the info */
	memset(pde6, 0, sizeof(struct lpfc_pde6));
	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
	bf_set(pde6_optx, pde6, txop);
	bf_set(pde6_oprx, pde6, rxop);
1925 1926 1927 1928 1929

	/*
	 * We only need to check the data on READs, for WRITEs
	 * protection data is automatically generated, not checked.
	 */
1930
	if (datadir == DMA_FROM_DEVICE) {
1931
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1932 1933 1934 1935
			bf_set(pde6_ce, pde6, checking);
		else
			bf_set(pde6_ce, pde6, 0);

1936
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1937 1938 1939
			bf_set(pde6_re, pde6, checking);
		else
			bf_set(pde6_re, pde6, 0);
1940 1941
	}
	bf_set(pde6_ai, pde6, 1);
J
James Smart 已提交
1942 1943
	bf_set(pde6_ae, pde6, 0);
	bf_set(pde6_apptagval, pde6, 0);
1944

1945
	/* Endianness conversion if necessary for PDE6 */
1946 1947 1948 1949
	pde6->word0 = cpu_to_le32(pde6->word0);
	pde6->word1 = cpu_to_le32(pde6->word1);
	pde6->word2 = cpu_to_le32(pde6->word2);

1950
	/* advance bpl and increment bde count */
1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
	num_bde++;
	bpl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
		bpl->tus.f.bdeSize = sg_dma_len(sgde);
		if (datadir == DMA_TO_DEVICE)
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		else
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
		bpl->tus.w = le32_to_cpu(bpl->tus.w);
		bpl++;
		num_bde++;
	}

out:
	return num_bde;
}

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
/**
 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
1983 1984 1985 1986 1987 1988 1989 1990 1991
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
1992
 *   start of first prot group  -->   |          PDE_5          |
1993
 *                                    +-------------------------+
1994 1995 1996
 *                                    |          PDE_6          |
 *                                    +-------------------------+
 *                                    |      PDE_7 (Prot BDE)   |
1997 1998 1999 2000 2001
 *                                    +-------------------------+
 *                                    |        Data BDE         |
 *                                    +-------------------------+
 *                                    |more Data BDE's ... (opt)|
 *                                    +-------------------------+
2002
 *   start of new  prot group  -->    |          PDE_5          |
2003 2004 2005 2006 2007 2008
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
2009 2010 2011
 *
 * Returns the number of BDEs added to the BPL.
 **/
2012 2013 2014 2015 2016 2017
static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2018 2019
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
2020
	struct lpfc_pde7 *pde7 = NULL;
2021 2022
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
2023 2024
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2025 2026
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
2027
	int status;
2028 2029 2030
	int datadir = sc->sc_data_direction;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
2031
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2032
	uint32_t rc;
2033
#endif
2034
	uint32_t checking = 1;
2035
	uint32_t reftag;
2036
	uint8_t txop, rxop;
2037 2038 2039 2040 2041 2042 2043
	int num_bde = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
				sgpe, sgde);
		return 0;
	}

	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command */
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2058
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2059
	if (rc) {
2060
		if (rc & BG_ERR_SWAP)
2061
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2062
		if (rc & BG_ERR_CHECK)
2063 2064 2065 2066 2067 2068
			checking = 0;
	}
#endif

	split_offset = 0;
	do {
2069 2070 2071 2072
		/* Check to see if we ran out of space */
		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
			return num_bde + 3;

2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
		/* setup PDE5 with what we have */
		pde5 = (struct lpfc_pde5 *) bpl;
		memset(pde5, 0, sizeof(struct lpfc_pde5));
		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

		/* Endianness conversion if necessary for PDE5 */
		pde5->word0 = cpu_to_le32(pde5->word0);
		pde5->reftag = cpu_to_le32(reftag);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;
		pde6 = (struct lpfc_pde6 *) bpl;

		/* setup PDE6 with the rest of the info */
		memset(pde6, 0, sizeof(struct lpfc_pde6));
		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
		bf_set(pde6_optx, pde6, txop);
		bf_set(pde6_oprx, pde6, rxop);
2092

2093
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2094 2095 2096 2097
			bf_set(pde6_ce, pde6, checking);
		else
			bf_set(pde6_ce, pde6, 0);

2098
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2099 2100 2101 2102
			bf_set(pde6_re, pde6, checking);
		else
			bf_set(pde6_re, pde6, 0);

2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
		bf_set(pde6_ai, pde6, 1);
		bf_set(pde6_ae, pde6, 0);
		bf_set(pde6_apptagval, pde6, 0);

		/* Endianness conversion if necessary for PDE6 */
		pde6->word0 = cpu_to_le32(pde6->word0);
		pde6->word1 = cpu_to_le32(pde6->word1);
		pde6->word2 = cpu_to_le32(pde6->word2);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;

		/* setup the first BDE that points to protection buffer */
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

		pde7 = (struct lpfc_pde7 *) bpl;
		memset(pde7, 0, sizeof(struct lpfc_pde7));
		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);

		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));

		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

		/* check if this pde is crossing the 4K boundary; if so split */
		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
			protgrp_bytes = protgrp_blks * blksize;
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}

		num_bde++;

		/* setup BDE's for data blocks associated with DIF data */
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2150 2151 2152 2153
			/* Check to see if we ran out of space */
			if (num_bde >= phba->cfg_total_seg_cnt)
				return num_bde + 1;

2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267
			if (!sgde) {
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9065 BLKGRD:%s Invalid data segment\n",
						__func__);
				return 0;
			}
			bpl++;
			dataphysaddr = sg_dma_address(sgde) + split_offset;
			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
				bpl->tus.f.bdeSize = remainder;
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
				split_offset += bpl->tus.f.bdeSize;
			}

			subtotal += bpl->tus.f.bdeSize;

			if (datadir == DMA_TO_DEVICE)
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			else
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
			bpl->tus.w = le32_to_cpu(bpl->tus.w);

			num_bde++;
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);

		}

		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
			bpl++;
			continue;
		}

		/* are we done ? */
		if (curr_prot == protcnt) {
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
			bpl++;

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9054 BLKGRD: bug in %s\n", __func__);
		}

	} while (!alldone);
out:

	return num_bde;
}

/**
 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
 *   start of prot group  -->     |         DI_SEED         |
 *                                +-------------------------+
 *                                |         Data SGE        |
 *                                +-------------------------+
 *                                |more Data SGE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t physaddr;
	int i = 0, num_sge = 0, status;
	uint32_t reftag;
	unsigned blksize;
	uint8_t txop, rxop;
2268
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2269
	uint32_t rc;
2270
#endif
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
	uint32_t checking = 1;
	uint32_t dma_len;
	uint32_t dma_offset = 0;

	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command for pde*/
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2284
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2285
	if (rc) {
2286
		if (rc & BG_ERR_SWAP)
2287
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2288
		if (rc & BG_ERR_CHECK)
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
			checking = 0;
	}
#endif

	/* setup DISEED with what we have */
	diseed = (struct sli4_sge_diseed *) sgl;
	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

	/* Endianness conversion if necessary */
	diseed->ref_tag = cpu_to_le32(reftag);
	diseed->ref_tag_tran = diseed->ref_tag;

2302 2303 2304 2305 2306
	/*
	 * We only need to check the data on READs, for WRITEs
	 * protection data is automatically generated, not checked.
	 */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2307
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2308 2309 2310 2311
			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);

2312
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2313 2314 2315 2316 2317
			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
	}

2318 2319 2320
	/* setup DISEED with the rest of the info */
	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2321

2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412
	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

	/* Endianness conversion if necessary for DISEED */
	diseed->word2 = cpu_to_le32(diseed->word2);
	diseed->word3 = cpu_to_le32(diseed->word3);

	/* advance bpl and increment sge count */
	num_sge++;
	sgl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		dma_len = sg_dma_len(sgde);
		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
		if ((i + 1) == datasegcnt)
			bf_set(lpfc_sli4_sge_last, sgl, 1);
		else
			bf_set(lpfc_sli4_sge_last, sgl, 0);
		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);

		sgl->sge_len = cpu_to_le32(dma_len);
		dma_offset += dma_len;

		sgl++;
		num_sge++;
	}

out:
	return num_sge;
}

/**
 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
 *   start of first prot group  -->   |         DISEED          |
 *                                    +-------------------------+
 *                                    |      DIF (Prot SGE)     |
 *                                    +-------------------------+
 *                                    |        Data SGE         |
 *                                    +-------------------------+
 *                                    |more Data SGE's ... (opt)|
 *                                    +-------------------------+
 *   start of new  prot group  -->    |         DISEED          |
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
	int status;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
	uint32_t reftag;
	uint8_t txop, rxop;
	uint32_t dma_len;
2413
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2414
	uint32_t rc;
2415
#endif
2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
	uint32_t checking = 1;
	uint32_t dma_offset = 0;
	int num_sge = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2426 2427 2428 2429
				sgpe, sgde);
		return 0;
	}

2430 2431
	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
2432 2433
		goto out;

2434
	/* extract some info from the scsi command */
2435
	blksize = lpfc_cmd_blksize(sc);
2436
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2437

2438
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2439
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2440
	if (rc) {
2441
		if (rc & BG_ERR_SWAP)
2442
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2443
		if (rc & BG_ERR_CHECK)
2444 2445
			checking = 0;
	}
2446 2447
#endif

2448 2449
	split_offset = 0;
	do {
2450 2451 2452 2453
		/* Check to see if we ran out of space */
		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
			return num_sge + 3;

2454 2455 2456 2457 2458 2459 2460 2461 2462
		/* setup DISEED with what we have */
		diseed = (struct sli4_sge_diseed *) sgl;
		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

		/* Endianness conversion if necessary */
		diseed->ref_tag = cpu_to_le32(reftag);
		diseed->ref_tag_tran = diseed->ref_tag;

2463
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);

		} else {
			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
			/*
			 * When in this mode, the hardware will replace
			 * the guard tag from the host with a
			 * newly generated good CRC for the wire.
			 * Switch to raw mode here to avoid this
			 * behavior. What the host sends gets put on the wire.
			 */
			if (txop == BG_OP_IN_CRC_OUT_CRC) {
				txop = BG_OP_RAW_MODE;
				rxop = BG_OP_RAW_MODE;
			}
		}


2482
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2483 2484 2485 2486
			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);

2487 2488 2489
		/* setup DISEED with the rest of the info */
		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2490

2491 2492 2493 2494 2495 2496 2497 2498 2499 2500
		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

		/* Endianness conversion if necessary for DISEED */
		diseed->word2 = cpu_to_le32(diseed->word2);
		diseed->word3 = cpu_to_le32(diseed->word3);

		/* advance sgl and increment bde count */
		num_sge++;
		sgl++;
2501 2502

		/* setup the first BDE that points to protection buffer */
2503 2504
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2505 2506 2507 2508

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

2509 2510 2511 2512 2513 2514
		/* Now setup DIF SGE */
		sgl->word2 = 0;
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
		sgl->word2 = cpu_to_le32(sgl->word2);
2515

2516 2517 2518
		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

2519 2520 2521
		/* check if DIF SGE is crossing the 4K boundary; if so split */
		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2522 2523
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
J
James Smart 已提交
2524
			protgrp_bytes = protgrp_blks * blksize;
2525 2526 2527 2528
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}
2529

2530
		num_sge++;
2531

2532
		/* setup SGE's for data blocks associated with DIF data */
2533 2534 2535
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2536 2537 2538 2539
			/* Check to see if we ran out of space */
			if (num_sge >= phba->cfg_total_seg_cnt)
				return num_sge + 1;

2540
			if (!sgde) {
2541
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2542
					"9086 BLKGRD:%s Invalid data segment\n",
2543 2544 2545
						__func__);
				return 0;
			}
2546
			sgl++;
2547 2548 2549 2550 2551 2552
			dataphysaddr = sg_dma_address(sgde) + split_offset;

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
2553
				dma_len = remainder;
2554 2555 2556 2557 2558 2559
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
2560 2561
				dma_len = protgrp_bytes - subtotal;
				split_offset += dma_len;
2562 2563
			}

2564
			subtotal += dma_len;
2565

2566 2567 2568 2569 2570
			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
			bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2571

2572 2573 2574 2575
			sgl->sge_len = cpu_to_le32(dma_len);
			dma_offset += dma_len;

			num_sge++;
2576 2577 2578 2579 2580 2581 2582 2583 2584
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);
		}

2585 2586 2587
		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
2588
			sgl++;
2589 2590 2591
			continue;
		}

2592 2593
		/* are we done ? */
		if (curr_prot == protcnt) {
2594
			bf_set(lpfc_sli4_sge_last, sgl, 1);
2595 2596 2597 2598
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
2599
			sgl++;
2600 2601 2602 2603 2604

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
2605
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2606
				"9085 BLKGRD: bug in %s\n", __func__);
2607 2608 2609
		}

	} while (!alldone);
2610

2611 2612
out:

2613
	return num_sge;
2614
}
2615

2616 2617 2618 2619 2620
/**
 * lpfc_prot_group_type - Get prtotection group type of SCSI command
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 *
2621 2622 2623
 * Given a SCSI command that supports DIF, determine composition of protection
 * groups involved in setting up buffer lists
 *
2624 2625 2626
 * Returns: Protection group type (with or without DIF)
 *
 **/
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
{
	int ret = LPFC_PG_TYPE_INVALID;
	unsigned char op = scsi_get_prot_op(sc);

	switch (op) {
	case SCSI_PROT_READ_STRIP:
	case SCSI_PROT_WRITE_INSERT:
		ret = LPFC_PG_TYPE_NO_DIF;
		break;
	case SCSI_PROT_READ_INSERT:
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_PASS:
	case SCSI_PROT_WRITE_PASS:
		ret = LPFC_PG_TYPE_DIF_BUF;
		break;
	default:
2645 2646 2647 2648
		if (phba)
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9021 Unsupported protection op:%d\n",
					op);
2649 2650 2651 2652 2653
		break;
	}
	return ret;
}

2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674
/**
 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
 *
 * Adjust the data length to account for how much data
 * is actually on the wire.
 *
 * returns the adjusted data length
 **/
static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
		       struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
	int fcpdl;

	fcpdl = scsi_bufflen(sc);

	/* Check if there is protection data on the wire */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2675
		/* Read check for protection data */
2676 2677 2678 2679
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
			return fcpdl;

	} else {
2680
		/* Write check for protection data */
2681 2682 2683 2684 2685 2686
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
			return fcpdl;
	}

	/*
	 * If we are in DIF Type 1 mode every data block has a 8 byte
2687 2688
	 * DIF (trailer) attached to it. Must ajust FCP data length
	 * to account for the protection data.
2689
	 */
2690
	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2691 2692 2693 2694

	return fcpdl;
}

2695 2696 2697 2698 2699
/**
 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
 *
2700 2701 2702
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
2703
 **/
2704
static int
2705
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2706 2707 2708 2709 2710 2711 2712 2713 2714
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	uint32_t num_bde = 0;
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
2715
	int fcpdl;
2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735

	/*
	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
	 *  fcp_rsp regions to the first data bde entry
	 */
	bpl += 2;
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		lpfc_cmd->seg_cnt = datasegcnt;
2736 2737 2738 2739

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
2740 2741 2742 2743 2744

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
2745 2746 2747 2748 2749

			/* Here we need to add a PDE5 and PDE6 to the count */
			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
				goto err;

2750 2751
			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
					datasegcnt);
2752
			/* we should have 2 or more entries in buffer list */
2753 2754 2755
			if (num_bde < 2)
				goto err;
			break;
2756 2757

		case LPFC_PG_TYPE_DIF_BUF:
2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
2772 2773 2774 2775 2776 2777 2778 2779

			/*
			 * There is a minimun of 4 BPLs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 4) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
2780 2781 2782

			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
					datasegcnt, protsegcnt);
2783
			/* we should have 3 or more entries in buffer list */
2784 2785
			if ((num_bde < 3) ||
			    (num_bde > phba->cfg_total_seg_cnt))
2786 2787
				goto err;
			break;
2788

2789 2790
		case LPFC_PG_TYPE_INVALID:
		default:
2791 2792 2793
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9022 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
	 * reinitialized since all iocb memory resources are used many times
	 * for transmit, receive, and continuation bpl's.
	 */
	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
	iocb_cmd->ulpBdeCount = 1;
	iocb_cmd->ulpLe = 1;

2812
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2813 2814 2815 2816 2817 2818 2819 2820
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

已提交
2821
	return 0;
2822
err:
2823 2824 2825 2826 2827 2828 2829
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

2830
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2831 2832 2833 2834
			"9023 Cannot setup S/G List for HBA"
			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2835
			prot_group_type, num_bde);
2836 2837 2838

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
2839 2840 2841
	return 1;
}

2842 2843 2844 2845 2846
/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CRC algorithmn
 * using crc_t10dif.
 */
2847
static uint16_t
2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
lpfc_bg_crc(uint8_t *data, int count)
{
	uint16_t crc = 0;
	uint16_t x;

	crc = crc_t10dif(data, count);
	x = cpu_to_be16(crc);
	return x;
}

/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CSUM algorithmn
 * using ip_compute_csum.
 */
2863
static uint16_t
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
lpfc_bg_csum(uint8_t *data, int count)
{
	uint16_t ret;

	ret = ip_compute_csum(data, count);
	return ret;
}

/*
 * This function examines the protection data to try to determine
 * what type of T10-DIF error occurred.
 */
2876
static void
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926
lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct scsi_dif_tuple *src = NULL;
	uint8_t *data_src = NULL;
	uint16_t guard_tag, guard_type;
	uint16_t start_app_tag, app_tag;
	uint32_t start_ref_tag, ref_tag;
	int prot, protsegcnt;
	int err_type, len, data_len;
	int chk_ref, chk_app, chk_guard;
	uint16_t sum;
	unsigned blksize;

	err_type = BGS_GUARD_ERR_MASK;
	sum = 0;
	guard_tag = 0;

	/* First check to see if there is protection data to examine */
	prot = scsi_get_prot_op(cmd);
	if ((prot == SCSI_PROT_READ_STRIP) ||
	    (prot == SCSI_PROT_WRITE_INSERT) ||
	    (prot == SCSI_PROT_NORMAL))
		goto out;

	/* Currently the driver just supports ref_tag and guard_tag checking */
	chk_ref = 1;
	chk_app = 0;
	chk_guard = 0;

	/* Setup a ptr to the protection data provided by the SCSI host */
	sgpe = scsi_prot_sglist(cmd);
	protsegcnt = lpfc_cmd->prot_seg_cnt;

	if (sgpe && protsegcnt) {

		/*
		 * We will only try to verify guard tag if the segment
		 * data length is a multiple of the blksize.
		 */
		sgde = scsi_sglist(cmd);
		blksize = lpfc_cmd_blksize(cmd);
		data_src = (uint8_t *)sg_virt(sgde);
		data_len = sgde->length;
		if ((data_len & (blksize - 1)) == 0)
			chk_guard = 1;
		guard_type = scsi_host_get_guard(cmd->device->host);

2927
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2928
		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
		start_app_tag = src->app_tag;
		len = sgpe->length;
		while (src && protsegcnt) {
			while (len) {

				/*
				 * First check to see if a protection data
				 * check is valid
				 */
				if ((src->ref_tag == 0xffffffff) ||
				    (src->app_tag == 0xffff)) {
					start_ref_tag++;
					goto skipit;
				}

2944
				/* First Guard Tag checking */
2945 2946
				if (chk_guard) {
					guard_tag = src->guard_tag;
2947
					if (lpfc_cmd_guard_csum(cmd))
2948 2949 2950 2951 2952 2953 2954 2955 2956 2957
						sum = lpfc_bg_csum(data_src,
								   blksize);
					else
						sum = lpfc_bg_crc(data_src,
								  blksize);
					if ((guard_tag != sum)) {
						err_type = BGS_GUARD_ERR_MASK;
						goto out;
					}
				}
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972

				/* Reference Tag checking */
				ref_tag = be32_to_cpu(src->ref_tag);
				if (chk_ref && (ref_tag != start_ref_tag)) {
					err_type = BGS_REFTAG_ERR_MASK;
					goto out;
				}
				start_ref_tag++;

				/* App Tag checking */
				app_tag = src->app_tag;
				if (chk_app && (app_tag != start_app_tag)) {
					err_type = BGS_APPTAG_ERR_MASK;
					goto out;
				}
2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
skipit:
				len -= sizeof(struct scsi_dif_tuple);
				if (len < 0)
					len = 0;
				src++;

				data_src += blksize;
				data_len -= blksize;

				/*
				 * Are we at the end of the Data segment?
				 * The data segment is only used for Guard
				 * tag checking.
				 */
				if (chk_guard && (data_len == 0)) {
					chk_guard = 0;
					sgde = sg_next(sgde);
					if (!sgde)
						goto out;

					data_src = (uint8_t *)sg_virt(sgde);
					data_len = sgde->length;
					if ((data_len & (blksize - 1)) == 0)
						chk_guard = 1;
				}
			}

			/* Goto the next Protection data segment */
			sgpe = sg_next(sgpe);
			if (sgpe) {
				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
				len = sgpe->length;
			} else {
				src = NULL;
			}
			protsegcnt--;
		}
	}
out:
	if (err_type == BGS_GUARD_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x1);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				sum, guard_tag);

	} else if (err_type == BGS_REFTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x3);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				ref_tag, start_ref_tag);

	} else if (err_type == BGS_APPTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x2);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				app_tag, start_app_tag);
	}
}


3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074
/*
 * This function checks for BlockGuard errors detected by
 * the HBA.  In case of errors, the ASC/ASCQ fields in the
 * sense buffer will be set accordingly, paired with
 * ILLEGAL_REQUEST to signal to the kernel that the HBA
 * detected corruption.
 *
 * Returns:
 *  0 - No error found
 *  1 - BlockGuard error found
 * -1 - Internal error (bad profile, ...etc)
 */
static int
lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
			struct lpfc_iocbq *pIocbOut)
{
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
	int ret = 0;
	uint32_t bghm = bgf->bghm;
	uint32_t bgstat = bgf->bgstat;
	uint64_t failing_sector = 0;

	spin_lock(&_dump_buf_lock);
	if (!_dump_buf_done) {
3075 3076
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
			" Data for %u blocks to debugfs\n",
3077
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3078
		lpfc_debug_save_data(phba, cmd);
3079 3080 3081 3082

		/* If we have a prot sgl, save the DIF buffer */
		if (lpfc_prot_group_type(phba, cmd) ==
				LPFC_PG_TYPE_DIF_BUF) {
3083 3084 3085 3086
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
				"Saving DIF for %u blocks to debugfs\n",
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
			lpfc_debug_save_dif(phba, cmd);
3087 3088 3089 3090 3091 3092 3093 3094
		}

		_dump_buf_done = 1;
	}
	spin_unlock(&_dump_buf_lock);

	if (lpfc_bgs_get_invalid_prof(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3095 3096 3097 3098 3099 3100
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9072 BLKGRD: Invalid BG Profile in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3101 3102 3103 3104 3105 3106
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3107 3108 3109 3110 3111 3112
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3113 3114 3115 3116 3117 3118 3119 3120 3121
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_guard_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x1);
M
Martin K. Petersen 已提交
3122
		cmd->result = DRIVER_SENSE << 24
3123 3124
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
3125 3126 3127 3128 3129 3130
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9055 BLKGRD: Guard Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3131 3132 3133 3134 3135 3136 3137
	}

	if (lpfc_bgs_get_reftag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x3);
M
Martin K. Petersen 已提交
3138
		cmd->result = DRIVER_SENSE << 24
3139 3140 3141
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
3142 3143 3144 3145 3146 3147
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9056 BLKGRD: Ref Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3148 3149 3150 3151 3152 3153 3154
	}

	if (lpfc_bgs_get_apptag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x2);
M
Martin K. Petersen 已提交
3155
		cmd->result = DRIVER_SENSE << 24
3156 3157 3158
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
3159 3160 3161 3162 3163 3164
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9061 BLKGRD: App Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3165 3166 3167 3168 3169
	}

	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
		/*
		 * setup sense data descriptor 0 per SPC-4 as an information
J
James Smart 已提交
3170 3171 3172
		 * field, and put the failing LBA in it.
		 * This code assumes there was also a guard/app/ref tag error
		 * indication.
3173
		 */
J
James Smart 已提交
3174 3175 3176 3177
		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192

		/* bghm is a "on the wire" FC frame based count */
		switch (scsi_get_prot_op(cmd)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			bghm /= cmd->device->sector_size;
			break;
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
			bghm /= (cmd->device->sector_size +
				sizeof(struct scsi_dif_tuple));
			break;
		}
3193 3194 3195 3196

		failing_sector = scsi_get_lba(cmd);
		failing_sector += bghm;

J
James Smart 已提交
3197 3198
		/* Descriptor Information */
		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3199 3200 3201 3202
	}

	if (!ret) {
		/* No error was reported - problem in FW? */
3203 3204 3205 3206 3207 3208 3209 3210 3211
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9057 BLKGRD: Unknown error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);

		/* Calcuate what type of error it was */
		lpfc_calc_bg_err(phba, lpfc_cmd);
3212 3213 3214
	}
out:
	return ret;
已提交
3215 3216
}

3217 3218 3219 3220 3221 3222 3223 3224 3225
/**
 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 * field of @lpfc_cmd for device with SLI-4 interface spec.
 *
 * Return codes:
3226 3227
 *	1 - Error
 *	0 - Success
3228 3229 3230 3231 3232 3233 3234 3235
 **/
static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3236
	struct sli4_sge *first_data_sgl;
3237 3238 3239 3240 3241 3242
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	dma_addr_t physaddr;
	uint32_t num_bde = 0;
	uint32_t dma_len;
	uint32_t dma_offset = 0;
	int nseg;
3243
	struct ulp_bde64 *bde;
3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

		nseg = scsi_dma_map(scsi_cmnd);
		if (unlikely(!nseg))
			return 1;
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
		sgl += 1;
3268
		first_data_sgl = sgl;
3269 3270
		lpfc_cmd->seg_cnt = nseg;
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3271 3272 3273 3274
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
				" %s: Too many sg segments from "
				"dma_map_sg.  Config %d, seg_cnt %d\n",
				__func__, phba->cfg_sg_seg_cnt,
3275
			       lpfc_cmd->seg_cnt);
3276
			lpfc_cmd->seg_cnt = 0;
3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
			scsi_dma_unmap(scsi_cmnd);
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the sge's.
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
		 */
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
			physaddr = sg_dma_address(sgel);
			dma_len = sg_dma_len(sgel);
			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3295
			sgl->word2 = le32_to_cpu(sgl->word2);
3296 3297 3298 3299 3300
			if ((num_bde + 1) == nseg)
				bf_set(lpfc_sli4_sge_last, sgl, 1);
			else
				bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3301
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3302
			sgl->word2 = cpu_to_le32(sgl->word2);
3303
			sgl->sge_len = cpu_to_le32(dma_len);
3304 3305 3306
			dma_offset += dma_len;
			sgl++;
		}
3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317
		/* setup the performance hint (first data BDE) if enabled */
		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
			bde = (struct ulp_bde64 *)
					&(iocb_cmd->unsli3.sli3Words[5]);
			bde->addrLow = first_data_sgl->addr_lo;
			bde->addrHigh = first_data_sgl->addr_hi;
			bde->tus.f.bdeSize =
					le32_to_cpu(first_data_sgl->sge_len);
			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			bde->tus.w = cpu_to_le32(bde->tus.w);
		}
3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338
	} else {
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized.
	 * all iocb memory resources are reused.
	 */
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3339 3340 3341 3342 3343

	/*
	 * If the OAS driver feature is enabled and the lun is enabled for
	 * OAS, set the oas iocb related flags.
	 */
3344
	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3345
		scsi_cmnd->device->hostdata)->oas_enabled)
3346
		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3347 3348 3349
	return 0;
}

3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366
/**
 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
 **/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3367
	uint32_t num_sge = 0;
3368 3369 3370 3371 3372 3373
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
	int fcpdl;

	/*
	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3374
	 *  fcp_rsp regions to the first data sge entry
3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);

		sgl += 1;
		lpfc_cmd->seg_cnt = datasegcnt;
3397 3398 3399 3400

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
3401 3402 3403 3404 3405

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
3406 3407 3408 3409 3410
			/* Here we need to add a DISEED to the count */
			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
				goto err;

			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3411
					datasegcnt);
3412

3413
			/* we should have 2 or more entries in buffer list */
3414
			if (num_sge < 2)
3415 3416
				goto err;
			break;
3417 3418

		case LPFC_PG_TYPE_DIF_BUF:
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
3433 3434 3435 3436 3437 3438 3439
			/*
			 * There is a minimun of 3 SGEs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 3) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
3440

3441
			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3442
					datasegcnt, protsegcnt);
3443

3444
			/* we should have 3 or more entries in buffer list */
3445 3446
			if ((num_sge < 3) ||
			    (num_sge > phba->cfg_total_seg_cnt))
3447 3448
				goto err;
			break;
3449

3450 3451
		case LPFC_PG_TYPE_INVALID:
		default:
3452 3453 3454
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

3455 3456 3457 3458 3459 3460 3461
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9083 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476
	switch (scsi_get_prot_op(scsi_cmnd)) {
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_STRIP:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
		break;
	case SCSI_PROT_WRITE_INSERT:
	case SCSI_PROT_READ_INSERT:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
		break;
	case SCSI_PROT_WRITE_PASS:
	case SCSI_PROT_READ_PASS:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
		break;
	}

3477 3478 3479 3480 3481 3482 3483 3484 3485
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

3486 3487 3488 3489 3490 3491 3492 3493
	/*
	 * If the OAS driver feature is enabled and the lun is enabled for
	 * OAS, set the oas iocb related flags.
	 */
	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
		scsi_cmnd->device->hostdata)->oas_enabled)
		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);

3494 3495
	return 0;
err:
3496 3497 3498 3499 3500 3501 3502
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

3503
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3504 3505 3506 3507 3508 3509 3510 3511
			"9084 Cannot setup S/G List for HBA"
			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
			prot_group_type, num_sge);

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
3512 3513 3514
	return 1;
}

3515 3516 3517 3518 3519 3520 3521 3522 3523
/**
 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
3524 3525
 *	1 - Error
 *	0 - Success
3526 3527 3528 3529 3530 3531 3532
 **/
static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551
/**
 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * using BlockGuard.
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
 *	1 - Error
 *	0 - Success
 **/
static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3552
/**
3553
 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573
 * @phba: Pointer to hba context object.
 * @vport: Pointer to vport object.
 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
 * @rsp_iocb: Pointer to response iocb object which reported error.
 *
 * This function posts an event when there is a SCSI command reporting
 * error from the scsi device.
 **/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
	struct lpfc_fast_path_event *fast_path_evt = NULL;
	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
	unsigned long flags;

3574 3575 3576
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645
	/* If there is queuefull or busy condition send a scsi event */
	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
		(cmnd->result == SAM_STAT_BUSY)) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.scsi_evt.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.scsi_evt.subcategory =
		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
			LPFC_EVENT_CHECK_COND;
		fast_path_evt->un.check_cond_evt.scsi_event.lun =
			cmnd->device->lun;
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.check_cond_evt.sense_key =
			cmnd->sense_buffer[2] & 0xf;
		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
		     fcpi_parm &&
		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
			((scsi_status == SAM_STAT_GOOD) &&
			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
		/*
		 * If status is good or resid does not match with fcp_param and
		 * there is valid fcpi_parm, then there is a read_check error
		 */
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.read_check_error.header.event_type =
			FC_REG_FABRIC_EVENT;
		fast_path_evt->un.read_check_error.header.subcategory =
			LPFC_EVENT_FCPRDCHKERR;
		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
		fast_path_evt->un.read_check_error.fcpiparam =
			fcpi_parm;
	} else
		return;

	fast_path_evt->vport = vport;
	spin_lock_irqsave(&phba->hbalock, flags);
	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	lpfc_worker_wake_up(phba);
	return;
}
3646 3647

/**
3648
 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3649
 * @phba: The HBA for which this call is being executed.
3650 3651 3652
 * @psb: The scsi buffer which is going to be un-mapped.
 *
 * This routine does DMA un-mapping of scatter gather list of scsi command
3653
 * field of @lpfc_cmd for device with SLI-3 interface spec.
3654
 **/
3655
static void
3656
lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3657 3658 3659 3660 3661 3662 3663
{
	/*
	 * There are only two special cases to consider.  (1) the scsi command
	 * requested scatter-gather usage or (2) the scsi command allocated
	 * a request buffer, but did not request use_sg.  There is a third
	 * case, but it does not require resource deallocation.
	 */
3664 3665
	if (psb->seg_cnt > 0)
		scsi_dma_unmap(psb->pCmd);
3666 3667 3668 3669
	if (psb->prot_seg_cnt > 0)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
				scsi_prot_sg_count(psb->pCmd),
				psb->pCmd->sc_data_direction);
3670 3671
}

3672
/**
3673
 * lpfc_handler_fcp_err - FCP response handler
3674 3675 3676 3677 3678 3679 3680 3681
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @rsp_iocb: The response IOCB which contains FCP error.
 *
 * This routine is called to process response IOCB with status field
 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
 * based upon SCSI and FCP error.
 **/
已提交
3682
static void
J
James Smart 已提交
3683 3684
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
		    struct lpfc_iocbq *rsp_iocb)
已提交
3685 3686 3687 3688
{
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3689
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
已提交
3690 3691
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
3692
	uint32_t *lp;
已提交
3693 3694
	uint32_t host_status = DID_OK;
	uint32_t rsplen = 0;
3695
	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
已提交
3696

3697

已提交
3698 3699 3700 3701 3702 3703 3704 3705 3706 3707
	/*
	 *  If this is a task management command, there is no
	 *  scsi packet associated with this lpfc_cmd.  The driver
	 *  consumes it.
	 */
	if (fcpcmd->fcpCntl2) {
		scsi_status = 0;
		goto out;
	}

3708 3709
	if (resp_info & RSP_LEN_VALID) {
		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3710
		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3711 3712
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2719 Invalid response length: "
H
Hannes Reinecke 已提交
3713
				 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3714 3715 3716 3717 3718 3719
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 rsplen);
			host_status = DID_ERROR;
			goto out;
		}
3720 3721 3722 3723
		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2757 Protocol failure detected during "
				 "processing of FCP I/O op: "
H
Hannes Reinecke 已提交
3724
				 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3725 3726 3727 3728 3729 3730
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 fcprsp->rspInfo3);
			host_status = DID_ERROR;
			goto out;
		}
3731 3732
	}

3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743
	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
		if (snslen > SCSI_SENSE_BUFFERSIZE)
			snslen = SCSI_SENSE_BUFFERSIZE;

		if (resp_info & RSP_LEN_VALID)
		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
	}
	lp = (uint32_t *)cmnd->sense_buffer;

3744 3745 3746 3747 3748 3749 3750 3751 3752
	/* special handling for under run conditions */
	if (!scsi_status && (resp_info & RESID_UNDER)) {
		/* don't log under runs if fcp set... */
		if (vport->cfg_log_verbose & LOG_FCP)
			logit = LOG_FCP_ERROR;
		/* unless operator says so */
		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
			logit = LOG_FCP_UNDER;
	}
3753

3754
	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3755
			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3756 3757 3758 3759 3760 3761 3762
			 "Data: x%x x%x x%x x%x x%x\n",
			 cmnd->cmnd[0], scsi_status,
			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
			 be32_to_cpu(fcprsp->rspResId),
			 be32_to_cpu(fcprsp->rspSnsLen),
			 be32_to_cpu(fcprsp->rspRspLen),
			 fcprsp->rspInfo3);
已提交
3763

3764
	scsi_set_resid(cmnd, 0);
已提交
3765
	if (resp_info & RESID_UNDER) {
3766
		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
已提交
3767

3768
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3769
				 "9025 FCP Read Underrun, expected %d, "
3770 3771 3772 3773
				 "residual %d Data: x%x x%x x%x\n",
				 be32_to_cpu(fcpcmd->fcpDl),
				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
				 cmnd->underflow);
已提交
3774

3775 3776 3777 3778 3779 3780 3781
		/*
		 * If there is an under run check if under run reported by
		 * storage array is same as the under run reported by HBA.
		 * If this is not same, there is a dropped frame.
		 */
		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
			fcpi_parm &&
3782
			(scsi_get_resid(cmnd) != fcpi_parm)) {
3783 3784
			lpfc_printf_vlog(vport, KERN_WARNING,
					 LOG_FCP | LOG_FCP_ERROR,
3785
					 "9026 FCP Read Check Error "
3786 3787 3788 3789
					 "and Underrun Data: x%x x%x x%x x%x\n",
					 be32_to_cpu(fcpcmd->fcpDl),
					 scsi_get_resid(cmnd), fcpi_parm,
					 cmnd->cmnd[0]);
3790
			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3791 3792
			host_status = DID_ERROR;
		}
已提交
3793 3794
		/*
		 * The cmnd->underflow is the minimum number of bytes that must
L
Lucas De Marchi 已提交
3795
		 * be transferred for this command.  Provided a sense condition
已提交
3796 3797 3798 3799 3800
		 * is not present, make sure the actual amount transferred is at
		 * least the underflow value or fail.
		 */
		if (!(resp_info & SNS_LEN_VALID) &&
		    (scsi_status == SAM_STAT_GOOD) &&
3801 3802
		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
		     < cmnd->underflow)) {
3803
			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3804
					 "9027 FCP command x%x residual "
3805 3806
					 "underrun converted to error "
					 "Data: x%x x%x x%x\n",
3807
					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3808
					 scsi_get_resid(cmnd), cmnd->underflow);
已提交
3809 3810 3811
			host_status = DID_ERROR;
		}
	} else if (resp_info & RESID_OVER) {
3812
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3813
				 "9028 FCP command x%x residual overrun error. "
3814
				 "Data: x%x x%x\n", cmnd->cmnd[0],
3815
				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
已提交
3816 3817 3818 3819
		host_status = DID_ERROR;

	/*
	 * Check SLI validation that all the transfer was actually done
3820
	 * (fcpi_parm should be zero). Apply check only to reads.
已提交
3821
	 */
3822
	} else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
3823
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3824
				 "9029 FCP Read Check Error Data: "
J
James Smart 已提交
3825
				 "x%x x%x x%x x%x x%x\n",
3826 3827
				 be32_to_cpu(fcpcmd->fcpDl),
				 be32_to_cpu(fcprsp->rspResId),
J
James Smart 已提交
3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839
				 fcpi_parm, cmnd->cmnd[0], scsi_status);
		switch (scsi_status) {
		case SAM_STAT_GOOD:
		case SAM_STAT_CHECK_CONDITION:
			/* Fabric dropped a data frame. Fail any successful
			 * command in which we detected dropped frames.
			 * A status of good or some check conditions could
			 * be considered a successful command.
			 */
			host_status = DID_ERROR;
			break;
		}
3840
		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
已提交
3841 3842 3843 3844
	}

 out:
	cmnd->result = ScsiResult(host_status, scsi_status);
3845
	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
已提交
3846 3847
}

3848
/**
3849
 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3850 3851
 * @phba: The Hba for which this call is being executed.
 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3852
 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3853 3854 3855 3856 3857
 *
 * This routine assigns scsi command result by looking into response IOCB
 * status field appropriately. This routine handles QUEUE FULL condition as
 * well by ramping down device queue depth.
 **/
已提交
3858 3859 3860 3861 3862 3863
static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
			struct lpfc_iocbq *pIocbOut)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) pIocbIn->context1;
J
James Smart 已提交
3864
	struct lpfc_vport      *vport = pIocbIn->vport;
已提交
3865 3866
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
3867
	struct scsi_cmnd *cmd;
3868
	int result;
3869
	int depth;
3870
	unsigned long flags;
3871
	struct lpfc_fast_path_event *fast_path_evt;
3872
	struct Scsi_Host *shost;
3873
	uint32_t queue_depth, scsi_id;
3874
	uint32_t logit = LOG_FCP;
已提交
3875

3876 3877 3878 3879 3880 3881
	/* Sanity check on return of outstanding command */
	if (!(lpfc_cmd->pCmd))
		return;
	cmd = lpfc_cmd->pCmd;
	shost = cmd->device->host;

3882
	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
已提交
3883
	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3884 3885 3886
	/* pick up SLI4 exhange busy status from HBA */
	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;

3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
	if (lpfc_cmd->prot_data_type) {
		struct scsi_dif_tuple *src = NULL;

		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
		/*
		 * Used to restore any changes to protection
		 * data for error injection.
		 */
		switch (lpfc_cmd->prot_data_type) {
		case LPFC_INJERR_REFTAG:
			src->ref_tag =
				lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_APPTAG:
			src->app_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_GUARD:
			src->guard_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		default:
			break;
		}

		lpfc_cmd->prot_data = 0;
		lpfc_cmd->prot_data_type = 0;
		lpfc_cmd->prot_data_segment = NULL;
	}
#endif
3918 3919
	if (pnode && NLP_CHK_NODE_ACT(pnode))
		atomic_dec(&pnode->cmd_pending);
已提交
3920 3921 3922 3923 3924 3925 3926

	if (lpfc_cmd->status) {
		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
		    (lpfc_cmd->result & IOERR_DRVR_MASK))
			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
		else if (lpfc_cmd->status >= IOSTAT_CNT)
			lpfc_cmd->status = IOSTAT_DEFAULT;
3927 3928 3929 3930
		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3931 3932 3933 3934
			logit = 0;
		else
			logit = LOG_FCP | LOG_FCP_UNDER;
		lpfc_printf_vlog(vport, KERN_WARNING, logit,
H
Hannes Reinecke 已提交
3935
			 "9030 FCP cmd x%x failed <%d/%lld> "
3936 3937 3938
			 "status: x%x result: x%x "
			 "sid: x%x did: x%x oxid: x%x "
			 "Data: x%x x%x\n",
3939 3940 3941 3942
			 cmd->cmnd[0],
			 cmd->device ? cmd->device->id : 0xffff,
			 cmd->device ? cmd->device->lun : 0xffff,
			 lpfc_cmd->status, lpfc_cmd->result,
3943 3944
			 vport->fc_myDID,
			 (pnode) ? pnode->nlp_DID : 0,
3945 3946
			 phba->sli_rev == LPFC_SLI_REV4 ?
			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3947 3948
			 pIocbOut->iocb.ulpContext,
			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
已提交
3949 3950 3951 3952

		switch (lpfc_cmd->status) {
		case IOSTAT_FCP_RSP_ERROR:
			/* Call FCP RSP handler to determine result */
J
James Smart 已提交
3953
			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
已提交
3954 3955 3956
			break;
		case IOSTAT_NPORT_BSY:
		case IOSTAT_FABRIC_BSY:
3957
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981
			fast_path_evt = lpfc_alloc_fast_evt(phba);
			if (!fast_path_evt)
				break;
			fast_path_evt->un.fabric_evt.event_type =
				FC_REG_FABRIC_EVENT;
			fast_path_evt->un.fabric_evt.subcategory =
				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
					&pnode->nlp_portname,
					sizeof(struct lpfc_name));
				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
					&pnode->nlp_nodename,
					sizeof(struct lpfc_name));
			}
			fast_path_evt->vport = vport;
			fast_path_evt->work_evt.evt =
				LPFC_EVT_FASTPATH_MGMT_EVT;
			spin_lock_irqsave(&phba->hbalock, flags);
			list_add_tail(&fast_path_evt->work_evt.evt_listp,
				&phba->work_list);
			spin_unlock_irqrestore(&phba->hbalock, flags);
			lpfc_worker_wake_up(phba);
已提交
3982
			break;
3983
		case IOSTAT_LOCAL_REJECT:
3984
		case IOSTAT_REMOTE_STOP:
3985 3986 3987 3988 3989 3990 3991 3992 3993
			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
				break;
			}
3994
			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3995
			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
3996 3997
			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3998
				cmd->result = ScsiResult(DID_REQUEUE, 0);
3999
				break;
4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015
			}
			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
					/*
					 * This is a response for a BG enabled
					 * cmd. Parse BG error
					 */
					lpfc_parse_bg_err(phba, lpfc_cmd,
							pIocbOut);
					break;
				} else {
					lpfc_printf_vlog(vport, KERN_WARNING,
							LOG_BG,
							"9031 non-zero BGSTAT "
4016
							"on unprotected cmd\n");
4017 4018
				}
			}
4019 4020 4021 4022 4023 4024 4025 4026
			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
				&& (phba->sli_rev == LPFC_SLI_REV4)
				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
				/* This IO was aborted by the target, we don't
				 * know the rxid and because we did not send the
				 * ABTS we cannot generate and RRQ.
				 */
				lpfc_set_rrq_active(phba, pnode,
4027 4028
					lpfc_cmd->cur_iocbq.sli4_lxritag,
					0, 0);
4029
			}
4030
		/* else: fall through */
已提交
4031 4032 4033 4034 4035
		default:
			cmd->result = ScsiResult(DID_ERROR, 0);
			break;
		}

4036
		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4037
		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4038 4039
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
						 SAM_STAT_BUSY);
4040
	} else
已提交
4041 4042 4043 4044 4045
		cmd->result = ScsiResult(DID_OK, 0);

	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
		uint32_t *lp = (uint32_t *)cmd->sense_buffer;

4046
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
H
Hannes Reinecke 已提交
4047
				 "0710 Iodone <%d/%llu> cmd %p, error "
4048 4049 4050 4051
				 "x%x SNS x%x x%x Data: x%x x%x\n",
				 cmd->device->id, cmd->device->lun, cmd,
				 cmd->result, *lp, *(lp + 3), cmd->retries,
				 scsi_get_resid(cmd));
已提交
4052 4053
	}

4054
	lpfc_update_stats(phba, lpfc_cmd);
4055
	result = cmd->result;
4056 4057 4058
	if (vport->cfg_max_scsicmpl_time &&
	   time_after(jiffies, lpfc_cmd->start_time +
		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4059
		spin_lock_irqsave(shost->host_lock, flags);
4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071
		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
			if (pnode->cmd_qdepth >
				atomic_read(&pnode->cmd_pending) &&
				(atomic_read(&pnode->cmd_pending) >
				LPFC_MIN_TGT_QDEPTH) &&
				((cmd->cmnd[0] == READ_10) ||
				(cmd->cmnd[0] == WRITE_10)))
				pnode->cmd_qdepth =
					atomic_read(&pnode->cmd_pending);

			pnode->last_change_time = jiffies;
		}
4072
		spin_unlock_irqrestore(shost->host_lock, flags);
4073
	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4074
		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
4075
		   time_after(jiffies, pnode->last_change_time +
4076
			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4077
			spin_lock_irqsave(shost->host_lock, flags);
4078 4079 4080 4081 4082 4083
			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
				/ 100;
			depth = depth ? depth : 1;
			pnode->cmd_qdepth += depth;
			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4084
			pnode->last_change_time = jiffies;
4085
			spin_unlock_irqrestore(shost->host_lock, flags);
4086
		}
4087 4088
	}

4089
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4090 4091 4092 4093

	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
	queue_depth = cmd->device->queue_depth;
	scsi_id = cmd->device->id;
4094 4095
	cmd->scsi_done(cmd);

4096
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4097
		spin_lock_irqsave(&phba->hbalock, flags);
4098
		lpfc_cmd->pCmd = NULL;
4099
		spin_unlock_irqrestore(&phba->hbalock, flags);
4100

4101 4102 4103 4104
		/*
		 * If there is a thread waiting for command completion
		 * wake up the thread.
		 */
4105
		spin_lock_irqsave(shost->host_lock, flags);
4106 4107
		if (lpfc_cmd->waitq)
			wake_up(lpfc_cmd->waitq);
4108
		spin_unlock_irqrestore(shost->host_lock, flags);
4109 4110 4111 4112
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return;
	}

4113
	spin_lock_irqsave(&phba->hbalock, flags);
4114
	lpfc_cmd->pCmd = NULL;
4115
	spin_unlock_irqrestore(&phba->hbalock, flags);
4116

4117 4118 4119 4120
	/*
	 * If there is a thread waiting for command completion
	 * wake up the thread.
	 */
4121
	spin_lock_irqsave(shost->host_lock, flags);
4122 4123
	if (lpfc_cmd->waitq)
		wake_up(lpfc_cmd->waitq);
4124
	spin_unlock_irqrestore(shost->host_lock, flags);
4125

4126
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4127 4128
}

4129
/**
4130
 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146
 * @data: A pointer to the immediate command data portion of the IOCB.
 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
 *
 * The routine copies the entire FCP command from @fcp_cmnd to @data while
 * byte swapping the data to big endian format for transmission on the wire.
 **/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
	int i, j;
	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
	     i += sizeof(uint32_t), j++) {
		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
	}
}

4147
/**
4148
 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4149 4150 4151 4152 4153
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: The scsi command which needs to send.
 * @pnode: Pointer to lpfc_nodelist.
 *
 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4154
 * to transfer for device with SLI3 interface spec.
4155
 **/
已提交
4156
static void
4157
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
J
James Smart 已提交
4158
		    struct lpfc_nodelist *pnode)
已提交
4159
{
J
James Smart 已提交
4160
	struct lpfc_hba *phba = vport->phba;
已提交
4161 4162 4163 4164 4165
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
	int datadir = scsi_cmnd->sc_data_direction;
4166 4167
	uint8_t *ptr;
	bool sli4;
4168
	uint32_t fcpdl;
已提交
4169

4170 4171 4172
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

已提交
4173
	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4174 4175
	/* clear task management bits */
	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
已提交
4176

4177 4178
	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
			&lpfc_cmd->fcp_cmnd->fcp_lun);
已提交
4179

4180 4181 4182 4183 4184 4185 4186
	ptr = &fcp_cmnd->fcpCdb[0];
	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
		ptr += scsi_cmnd->cmd_len;
		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
	}

4187
	fcp_cmnd->fcpCntl1 = SIMPLE_Q;
已提交
4188

4189
	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4190
	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4191

已提交
4192 4193 4194 4195 4196 4197
	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
4198
	if (scsi_sg_count(scsi_cmnd)) {
已提交
4199 4200
		if (datadir == DMA_TO_DEVICE) {
			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4201
			iocb_cmd->ulpPU = PARM_READ_CHECK;
4202 4203
			if (vport->cfg_first_burst_size &&
			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
4204 4205 4206 4207 4208 4209
				fcpdl = scsi_bufflen(scsi_cmnd);
				if (fcpdl < vport->cfg_first_burst_size)
					piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
				else
					piocbq->iocb.un.fcpi.fcpi_XRdy =
						vport->cfg_first_burst_size;
4210
			}
已提交
4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225
			fcp_cmnd->fcpCntl3 = WRITE_DATA;
			phba->fc4OutputRequests++;
		} else {
			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
			iocb_cmd->ulpPU = PARM_READ_CHECK;
			fcp_cmnd->fcpCntl3 = READ_DATA;
			phba->fc4InputRequests++;
		}
	} else {
		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
		iocb_cmd->un.fcpi.fcpi_parm = 0;
		iocb_cmd->ulpPU = 0;
		fcp_cmnd->fcpCntl3 = 0;
		phba->fc4ControlRequests++;
	}
4226 4227
	if (phba->sli_rev == 3 &&
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4228
		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4229 4230 4231 4232 4233
	/*
	 * Finish initializing those IOCB fields that are independent
	 * of the scsi_cmnd request_buffer
	 */
	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4234
	if (sli4)
4235 4236
		piocbq->iocb.ulpContext =
		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
已提交
4237 4238
	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
		piocbq->iocb.ulpFCP2Rcvy = 1;
4239 4240
	else
		piocbq->iocb.ulpFCP2Rcvy = 0;
已提交
4241 4242 4243 4244 4245

	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
	piocbq->context1  = lpfc_cmd;
	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
J
James Smart 已提交
4246
	piocbq->vport = vport;
已提交
4247 4248
}

4249
/**
4250
 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4251 4252 4253 4254 4255
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @lun: Logical unit number.
 * @task_mgmt_cmd: SCSI task management command.
 *
4256 4257
 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
 * for device with SLI-3 interface spec.
4258 4259 4260 4261 4262
 *
 * Return codes:
 *   0 - Error
 *   1 - Success
 **/
已提交
4263
static int
4264
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
已提交
4265
			     struct lpfc_scsi_buf *lpfc_cmd,
H
Hannes Reinecke 已提交
4266
			     uint64_t lun,
已提交
4267 4268 4269 4270 4271
			     uint8_t task_mgmt_cmd)
{
	struct lpfc_iocbq *piocbq;
	IOCB_t *piocb;
	struct fcp_cmnd *fcp_cmnd;
4272
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
已提交
4273 4274
	struct lpfc_nodelist *ndlp = rdata->pnode;

4275 4276
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
已提交
4277 4278 4279
		return 0;

	piocbq = &(lpfc_cmd->cur_iocbq);
J
James Smart 已提交
4280 4281
	piocbq->vport = vport;

已提交
4282 4283 4284
	piocb = &piocbq->iocb;

	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4285 4286 4287
	/* Clear out any old data in the FCP command area */
	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
已提交
4288
	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4289 4290
	if (vport->phba->sli_rev == 3 &&
	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4291
		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4292 4293
	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
	piocb->ulpContext = ndlp->nlp_rpi;
4294 4295 4296 4297
	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
		piocb->ulpContext =
		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
	}
4298
	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
已提交
4299
	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4300 4301
	piocb->ulpPU = 0;
	piocb->un.fcpi.fcpi_parm = 0;
已提交
4302 4303 4304 4305 4306 4307 4308 4309

	/* ulpTimeout is only one byte */
	if (lpfc_cmd->timeout > 0xff) {
		/*
		 * Do not timeout the command at the firmware level.
		 * The driver will provide the timeout mechanism.
		 */
		piocb->ulpTimeout = 0;
4310
	} else
已提交
4311
		piocb->ulpTimeout = lpfc_cmd->timeout;
4312

4313 4314
	if (vport->phba->sli_rev == LPFC_SLI_REV4)
		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4315

4316
	return 1;
4317 4318 4319
}

/**
L
Lucas De Marchi 已提交
4320
 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331
 * @phba: The hba struct for which this call is being executed.
 * @dev_grp: The HBA PCI-Device group number.
 *
 * This routine sets up the SCSI interface API function jump table in @phba
 * struct.
 * Returns: 0 - success, -ENODEV - failure.
 **/
int
lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{

4332 4333 4334
	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;

4335 4336 4337 4338
	switch (dev_grp) {
	case LPFC_PCI_DEV_LP:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4339
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4340
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4341
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4342
		break;
4343 4344 4345
	case LPFC_PCI_DEV_OC:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4346
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4347
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4348
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4349
		break;
4350 4351 4352 4353 4354 4355 4356 4357
	default:
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"1418 Invalid HBA PCI-device group: 0x%x\n",
				dev_grp);
		return -ENODEV;
		break;
	}
	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4358
	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4359 4360 4361
	return 0;
}

4362
/**
4363
 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4364 4365 4366 4367 4368 4369 4370
 * @phba: The Hba for which this call is being executed.
 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
 * @rspiocbq: Pointer to lpfc_iocbq data structure.
 *
 * This routine is IOCB completion routine for device reset and target reset
 * routine. This routine release scsi buffer associated with lpfc_cmd.
 **/
4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
			struct lpfc_iocbq *cmdiocbq,
			struct lpfc_iocbq *rspiocbq)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) cmdiocbq->context1;
	if (lpfc_cmd)
		lpfc_release_scsi_buf(phba, lpfc_cmd);
	return;
}

4383
/**
4384
 * lpfc_info - Info entry point of scsi_host_template data structure
4385 4386 4387 4388 4389 4390 4391
 * @host: The scsi host for which this call is being executed.
 *
 * This routine provides module information about hba.
 *
 * Reutrn code:
 *   Pointer to char - Success.
 **/
已提交
4392 4393 4394
const char *
lpfc_info(struct Scsi_Host *host)
{
J
James Smart 已提交
4395 4396
	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4397
	int len, link_speed = 0;
已提交
4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416
	static char  lpfcinfobuf[384];

	memset(lpfcinfobuf,0,384);
	if (phba && phba->pcidev){
		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
		len = strlen(lpfcinfobuf);
		snprintf(lpfcinfobuf + len,
			384-len,
			" on PCI bus %02x device %02x irq %d",
			phba->pcidev->bus->number,
			phba->pcidev->devfn,
			phba->pcidev->irq);
		len = strlen(lpfcinfobuf);
		if (phba->Port[0]) {
			snprintf(lpfcinfobuf + len,
				 384-len,
				 " port %s",
				 phba->Port);
		}
4417
		len = strlen(lpfcinfobuf);
4418 4419 4420 4421 4422 4423 4424 4425
		if (phba->sli_rev <= LPFC_SLI_REV3) {
			link_speed = lpfc_sli_port_speed_get(phba);
		} else {
			if (phba->sli4_hba.link_state.logical_speed)
				link_speed =
				      phba->sli4_hba.link_state.logical_speed;
			else
				link_speed = phba->sli4_hba.link_state.speed;
4426
		}
4427 4428 4429
		if (link_speed != 0)
			snprintf(lpfcinfobuf + len, 384-len,
				 " Logical Link Speed: %d Mbps", link_speed);
已提交
4430 4431 4432 4433
	}
	return lpfcinfobuf;
}

4434
/**
4435
 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4436 4437 4438 4439 4440
 * @phba: The Hba for which this call is being executed.
 *
 * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
 * The default value of cfg_poll_tmo is 10 milliseconds.
 **/
4441 4442 4443 4444 4445
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
	unsigned long  poll_tmo_expires =
		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));

4446
	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4447 4448 4449 4450
		mod_timer(&phba->fcp_poll_timer,
			  poll_tmo_expires);
}

4451
/**
4452
 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4453 4454 4455 4456
 * @phba: The Hba for which this call is being executed.
 *
 * This routine starts the fcp_poll_timer of @phba.
 **/
4457 4458 4459 4460 4461
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
	lpfc_poll_rearm_timer(phba);
}

4462
/**
4463
 * lpfc_poll_timeout - Restart polling timer
4464 4465 4466 4467 4468 4469
 * @ptr: Map to lpfc_hba data structure pointer.
 *
 * This routine restarts fcp_poll timer, when FCP ring  polling is enable
 * and FCP Ring interrupt is disable.
 **/

4470 4471
void lpfc_poll_timeout(unsigned long ptr)
{
J
James Smart 已提交
4472
	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4473 4474

	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4475 4476 4477
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);

4478 4479 4480 4481 4482
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}
}

4483
/**
4484
 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495
 * @cmnd: Pointer to scsi_cmnd data structure.
 * @done: Pointer to done routine.
 *
 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
 * This routine prepares an IOCB from scsi command and provides to firmware.
 * The @done callback is invoked after driver finished processing the command.
 *
 * Return value :
 *   0 - Success
 *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
 **/
已提交
4496
static int
4497
lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
已提交
4498
{
J
James Smart 已提交
4499 4500
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4501
	struct lpfc_rport_data *rdata;
4502
	struct lpfc_nodelist *ndlp;
4503
	struct lpfc_scsi_buf *lpfc_cmd;
4504 4505
	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
	int err;
已提交
4506

4507
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4508 4509 4510
	err = fc_remote_port_chkready(rport);
	if (err) {
		cmnd->result = err;
已提交
4511 4512
		goto out_fail_command;
	}
4513
	ndlp = rdata->pnode;
已提交
4514

4515
	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4516
		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4517

4518 4519 4520 4521
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
				" op:%02x str=%s without registering for"
				" BlockGuard - Rejecting command\n",
4522 4523 4524 4525 4526
				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
				dif_op_str[scsi_get_prot_op(cmnd)]);
		goto out_fail_command;
	}

已提交
4527
	/*
4528 4529
	 * Catch race where our node has transitioned, but the
	 * transport is still transitioning.
已提交
4530
	 */
4531 4532
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
		goto out_tgt_busy;
4533
	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4534
		goto out_tgt_busy;
4535

4536
	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
已提交
4537
	if (lpfc_cmd == NULL) {
4538
		lpfc_rampdown_queue_depth(phba);
4539

4540 4541 4542
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0707 driver's buffer pool is empty, "
				 "IO busied\n");
已提交
4543 4544 4545 4546 4547 4548 4549 4550 4551 4552
		goto out_host_busy;
	}

	/*
	 * Store the midlayer's command structure for the completion phase
	 * and complete the command initialization.
	 */
	lpfc_cmd->pCmd  = cmnd;
	lpfc_cmd->rdata = rdata;
	lpfc_cmd->timeout = 0;
4553
	lpfc_cmd->start_time = jiffies;
已提交
4554 4555
	cmnd->host_scribble = (unsigned char *)lpfc_cmd;

4556
	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4557
		if (vport->phba->cfg_enable_bg) {
4558 4559
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4560 4561 4562 4563 4564 4565 4566
					 "9033 BLKGRD: rcvd %s cmd:x%x "
					 "sector x%llx cnt %u pt %x\n",
					 dif_op_str[scsi_get_prot_op(cmnd)],
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
					 blk_rq_sectors(cmnd->request),
					 (cmnd->cmnd[1]>>5));
4567
		}
4568 4569
		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
	} else {
4570
		if (vport->phba->cfg_enable_bg) {
4571 4572
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4573 4574 4575 4576
					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
					 "x%x sector x%llx cnt %u pt %x\n",
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
4577
					 blk_rq_sectors(cmnd->request),
4578
					 (cmnd->cmnd[1]>>5));
4579
		}
4580 4581 4582
		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
	}

已提交
4583 4584 4585
	if (err)
		goto out_host_busy_free_buf;

J
James Smart 已提交
4586
	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
已提交
4587

4588
	atomic_inc(&ndlp->cmd_pending);
4589
	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4590
				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4591 4592
	if (err) {
		atomic_dec(&ndlp->cmd_pending);
4593 4594
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "3376 FCP could not issue IOCB err %x"
H
Hannes Reinecke 已提交
4595
				 "FCP cmd x%x <%d/%llu> "
4596 4597 4598 4599
				 "sid: x%x did: x%x oxid: x%x "
				 "Data: x%x x%x x%x x%x\n",
				 err, cmnd->cmnd[0],
				 cmnd->device ? cmnd->device->id : 0xffff,
H
Hannes Reinecke 已提交
4600
				 cmnd->device ? cmnd->device->lun : (u64) -1,
4601 4602 4603 4604 4605 4606 4607 4608 4609 4610
				 vport->fc_myDID, ndlp->nlp_DID,
				 phba->sli_rev == LPFC_SLI_REV4 ?
				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
				 (uint32_t)
				 (cmnd->request->timeout / 1000));


已提交
4611
		goto out_host_busy_free_buf;
4612
	}
4613
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4614 4615 4616
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);

4617 4618 4619 4620
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
4621 4622 4623
	return 0;

 out_host_busy_free_buf:
4624
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4625
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4626 4627 4628
 out_host_busy:
	return SCSI_MLQUEUE_HOST_BUSY;

4629 4630 4631
 out_tgt_busy:
	return SCSI_MLQUEUE_TARGET_BUSY;

已提交
4632
 out_fail_command:
4633
	cmnd->scsi_done(cmnd);
已提交
4634 4635 4636
	return 0;
}

J
Jeff Garzik 已提交
4637

4638
/**
4639
 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4640 4641 4642 4643 4644 4645 4646 4647
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine aborts @cmnd pending in base driver.
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
已提交
4648
static int
4649
lpfc_abort_handler(struct scsi_cmnd *cmnd)
已提交
4650
{
J
James Smart 已提交
4651 4652 4653
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4654 4655
	struct lpfc_iocbq *iocb;
	struct lpfc_iocbq *abtsiocb;
已提交
4656 4657
	struct lpfc_scsi_buf *lpfc_cmd;
	IOCB_t *cmd, *icmd;
4658
	int ret = SUCCESS, status = 0;
4659 4660 4661
	struct lpfc_sli_ring *pring_s4;
	int ring_number, ret_val;
	unsigned long flags, iflags;
4662
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
已提交
4663

4664
	status = fc_block_scsi_eh(cmnd);
4665
	if (status != 0 && status != SUCCESS)
4666
		return status;
4667

4668
	spin_lock_irqsave(&phba->hbalock, flags);
4669 4670
	/* driver queued commands are in process of being flushed */
	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4671
		spin_unlock_irqrestore(&phba->hbalock, flags);
4672 4673 4674 4675 4676 4677
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3168 SCSI Layer abort requested I/O has been "
			"flushed by LLD.\n");
		return FAILED;
	}

4678
	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4679
	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4680
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
4681 4682
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
H
Hannes Reinecke 已提交
4683
			 "x%x ID %d LUN %llu\n",
4684
			 SUCCESS, cmnd->device->id, cmnd->device->lun);
J
James Smart 已提交
4685 4686
		return SUCCESS;
	}
已提交
4687

4688 4689 4690
	iocb = &lpfc_cmd->cur_iocbq;
	/* the command is in process of being cancelled */
	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4691
		spin_unlock_irqrestore(&phba->hbalock, flags);
4692 4693 4694 4695 4696
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3169 SCSI Layer abort requested I/O has been "
			"cancelled by LLD.\n");
		return FAILED;
	}
4697 4698 4699 4700
	/*
	 * If pCmd field of the corresponding lpfc_scsi_buf structure
	 * points to a different SCSI command, then the driver has
	 * already completed this command, but the midlayer did not
4701
	 * see the completion before the eh fired. Just return SUCCESS.
4702
	 */
4703 4704 4705 4706 4707 4708
	if (lpfc_cmd->pCmd != cmnd) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3170 SCSI Layer abort requested I/O has been "
			"completed by LLD.\n");
		goto out_unlock;
	}
已提交
4709

4710
	BUG_ON(iocb->context1 != lpfc_cmd);
已提交
4711

4712 4713 4714 4715 4716 4717 4718 4719
	/* abort issued in recovery is still in progress */
	if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "3389 SCSI Layer I/O Abort Request is pending\n");
		spin_unlock_irqrestore(&phba->hbalock, flags);
		goto wait_for_cmpl;
	}

4720
	abtsiocb = __lpfc_sli_get_iocbq(phba);
4721 4722
	if (abtsiocb == NULL) {
		ret = FAILED;
4723
		goto out_unlock;
已提交
4724 4725
	}

4726 4727 4728
	/* Indicate the IO is being aborted by the driver. */
	iocb->iocb_flag |= LPFC_DRIVER_ABORTED;

已提交
4729
	/*
4730 4731 4732
	 * The scsi command can not be in txq and it is in flight because the
	 * pCmd is still pointig at the SCSI command we have to abort. There
	 * is no need to search the txcmplq. Just send an abort to the FW.
已提交
4733 4734
	 */

4735 4736 4737 4738
	cmd = &iocb->iocb;
	icmd = &abtsiocb->iocb;
	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4739 4740 4741 4742
	if (phba->sli_rev == LPFC_SLI_REV4)
		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
	else
		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
已提交
4743

4744 4745
	icmd->ulpLe = 1;
	icmd->ulpClass = cmd->ulpClass;
4746 4747 4748

	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4749
	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4750 4751
	if (iocb->iocb_flag & LPFC_IO_FOF)
		abtsiocb->iocb_flag |= LPFC_IO_FOF;
4752

J
James Smart 已提交
4753
	if (lpfc_is_link_up(phba))
4754 4755 4756
		icmd->ulpCommand = CMD_ABORT_XRI_CN;
	else
		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
已提交
4757

4758
	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
J
James Smart 已提交
4759
	abtsiocb->vport = vport;
4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771
	if (phba->sli_rev == LPFC_SLI_REV4) {
		ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
		pring_s4 = &phba->sli.ring[ring_number];
		/* Note: both hbalock and ring_lock must be set here */
		spin_lock_irqsave(&pring_s4->ring_lock, iflags);
		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
						abtsiocb, 0);
		spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
	} else {
		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
						abtsiocb, 0);
	}
4772
	/* no longer need the lock after this point */
4773
	spin_unlock_irqrestore(&phba->hbalock, flags);
4774

4775 4776

	if (ret_val == IOCB_ERROR) {
4777 4778 4779 4780
		lpfc_sli_release_iocbq(phba, abtsiocb);
		ret = FAILED;
		goto out;
	}
已提交
4781

4782
	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4783 4784
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4785

4786
wait_for_cmpl:
4787
	lpfc_cmd->waitq = &waitq;
4788
	/* Wait for abort to complete */
4789 4790
	wait_event_timeout(waitq,
			  (lpfc_cmd->pCmd != cmnd),
4791
			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4792 4793

	spin_lock_irqsave(shost->host_lock, flags);
4794
	lpfc_cmd->waitq = NULL;
4795
	spin_unlock_irqrestore(shost->host_lock, flags);
已提交
4796

4797 4798
	if (lpfc_cmd->pCmd == cmnd) {
		ret = FAILED;
4799 4800
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0748 abort handler timed out waiting "
4801
				 "for abortng I/O (xri:x%x) to complete: "
H
Hannes Reinecke 已提交
4802
				 "ret %#x, ID %d, LUN %llu\n",
4803 4804
				 iocb->sli4_xritag, ret,
				 cmnd->device->id, cmnd->device->lun);
已提交
4805
	}
4806
	goto out;
已提交
4807

4808
out_unlock:
4809
	spin_unlock_irqrestore(&phba->hbalock, flags);
4810
out:
4811 4812
	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
H
Hannes Reinecke 已提交
4813
			 "LUN %llu\n", ret, cmnd->device->id,
4814
			 cmnd->device->lun);
4815
	return ret;
4816 4817
}

4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840
static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
{
	switch (task_mgmt_cmd) {
	case FCP_ABORT_TASK_SET:
		return "ABORT_TASK_SET";
	case FCP_CLEAR_TASK_SET:
		return "FCP_CLEAR_TASK_SET";
	case FCP_BUS_RESET:
		return "FCP_BUS_RESET";
	case FCP_LUN_RESET:
		return "FCP_LUN_RESET";
	case FCP_TARGET_RESET:
		return "FCP_TARGET_RESET";
	case FCP_CLEAR_ACA:
		return "FCP_CLEAR_ACA";
	case FCP_TERMINATE_TASK:
		return "FCP_TERMINATE_TASK";
	default:
		return "unknown";
	}
}

4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907

/**
 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 *
 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
static int
lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t rsp_info;
	uint32_t rsp_len;
	uint8_t  rsp_info_code;
	int ret = FAILED;


	if (fcprsp == NULL)
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0703 fcp_rsp is missing\n");
	else {
		rsp_info = fcprsp->rspStatus2;
		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
		rsp_info_code = fcprsp->rspInfo3;


		lpfc_printf_vlog(vport, KERN_INFO,
				 LOG_FCP,
				 "0706 fcp_rsp valid 0x%x,"
				 " rsp len=%d code 0x%x\n",
				 rsp_info,
				 rsp_len, rsp_info_code);

		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
			switch (rsp_info_code) {
			case RSP_NO_FAILURE:
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0715 Task Mgmt No Failure\n");
				ret = SUCCESS;
				break;
			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0716 Task Mgmt Target "
						"reject\n");
				break;
			case RSP_TM_NOT_COMPLETED: /* TM failed */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0717 Task Mgmt Target "
						"failed TM\n");
				break;
			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0718 Task Mgmt to invalid "
						"LUN\n");
				break;
			}
		}
	}
	return ret;
}


4908
/**
4909 4910 4911 4912 4913 4914
 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
 * @vport: The virtual port for which this call is being executed.
 * @rdata: Pointer to remote port local data
 * @tgt_id: Target ID of remote device.
 * @lun_id: Lun number for the TMF
 * @task_mgmt_cmd: type of TMF to send
4915
 *
4916 4917
 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
 * a remote port.
4918
 *
4919 4920 4921
 * Return Code:
 *   0x2003 - Error
 *   0x2002 - Success.
4922
 **/
已提交
4923
static int
4924
lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
H
Hannes Reinecke 已提交
4925
		    unsigned  tgt_id, uint64_t lun_id,
4926
		    uint8_t task_mgmt_cmd)
已提交
4927
{
J
James Smart 已提交
4928
	struct lpfc_hba   *phba = vport->phba;
4929
	struct lpfc_scsi_buf *lpfc_cmd;
4930 4931
	struct lpfc_iocbq *iocbq;
	struct lpfc_iocbq *iocbqrsp;
4932
	struct lpfc_nodelist *pnode = rdata->pnode;
4933
	int ret;
4934
	int status;
已提交
4935

4936
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4937
		return FAILED;
4938

4939
	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
已提交
4940
	if (lpfc_cmd == NULL)
4941
		return FAILED;
4942
	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
4943
	lpfc_cmd->rdata = rdata;
已提交
4944

4945 4946
	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
					   task_mgmt_cmd);
4947 4948 4949 4950
	if (!status) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
已提交
4951

4952
	iocbq = &lpfc_cmd->cur_iocbq;
4953
	iocbqrsp = lpfc_sli_get_iocbq(phba);
4954 4955 4956 4957
	if (iocbqrsp == NULL) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
4958
	iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4959

4960
	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
H
Hannes Reinecke 已提交
4961
			 "0702 Issue %s to TGT %d LUN %llu "
4962
			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
4963
			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
4964 4965
			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
			 iocbq->iocb_flag);
4966

4967
	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
4968
					  iocbq, iocbqrsp, lpfc_cmd->timeout);
4969 4970
	if ((status != IOCB_SUCCESS) ||
	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
4971
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
4972
			 "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) "
4973
			 "iocb_flag x%x\n",
4974 4975
			 lpfc_taskmgmt_name(task_mgmt_cmd),
			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
4976 4977
			 iocbqrsp->iocb.un.ulpWord[4],
			 iocbq->iocb_flag);
4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992
		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
		if (status == IOCB_SUCCESS) {
			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
				/* Something in the FCP_RSP was invalid.
				 * Check conditions */
				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
			else
				ret = FAILED;
		} else if (status == IOCB_TIMEDOUT) {
			ret = TIMEOUT_ERROR;
		} else {
			ret = FAILED;
		}
		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
	} else
4993 4994
		ret = SUCCESS;

4995
	lpfc_sli_release_iocbq(phba, iocbqrsp);
4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017

	if (ret != TIMEOUT_ERROR)
		lpfc_release_scsi_buf(phba, lpfc_cmd);

	return ret;
}

/**
 * lpfc_chk_tgt_mapped -
 * @vport: The virtual port to check on
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine delays until the scsi target (aka rport) for the
 * command exists (is present and logged in) or we declare it non-existent.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
5018
	struct lpfc_rport_data *rdata;
5019
	struct lpfc_nodelist *pnode;
5020 5021
	unsigned long later;

5022
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5023 5024 5025 5026 5027 5028
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039
	/*
	 * If target is not in a MAPPED state, delay until
	 * target is rediscovered or devloss timeout expires.
	 */
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies)) {
		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
			return FAILED;
		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
			return SUCCESS;
		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5040
		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075
		if (!rdata)
			return FAILED;
		pnode = rdata->pnode;
	}
	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
		return FAILED;
	return SUCCESS;
}

/**
 * lpfc_reset_flush_io_context -
 * @vport: The virtual port (scsi_host) for the flush context
 * @tgt_id: If aborting by Target contect - specifies the target id
 * @lun_id: If aborting by Lun context - specifies the lun id
 * @context: specifies the context level to flush at.
 *
 * After a reset condition via TMF, we need to flush orphaned i/o
 * contexts from the adapter. This routine aborts any contexts
 * outstanding, then waits for their completions. The wait is
 * bounded by devloss_tmo though.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
			uint64_t lun_id, lpfc_ctx_cmd context)
{
	struct lpfc_hba   *phba = vport->phba;
	unsigned long later;
	int cnt;

	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5076
	if (cnt)
5077 5078 5079
		lpfc_sli_abort_taskmgmt(vport,
					&phba->sli.ring[phba->sli.fcp_ring],
					tgt_id, lun_id, context);
5080 5081 5082
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies) && cnt) {
		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5083
		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
已提交
5084 5085
	}
	if (cnt) {
5086
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5087 5088 5089 5090 5091 5092
			"0724 I/O flush failure for context %s : cnt x%x\n",
			((context == LPFC_CTX_LUN) ? "LUN" :
			 ((context == LPFC_CTX_TGT) ? "TGT" :
			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
			cnt);
		return FAILED;
已提交
5093
	}
5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112
	return SUCCESS;
}

/**
 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a device reset by sending a LUN_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5113
	struct lpfc_rport_data *rdata;
5114
	struct lpfc_nodelist *pnode;
5115
	unsigned tgt_id = cmnd->device->id;
H
Hannes Reinecke 已提交
5116
	uint64_t lun_id = cmnd->device->lun;
5117
	struct lpfc_scsi_event_header scsi_event;
5118
	int status;
5119

5120
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5121 5122 5123 5124 5125 5126
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0798 Device Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5127
	status = fc_block_scsi_eh(cmnd);
5128
	if (status != 0 && status != SUCCESS)
5129
		return status;
5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0721 Device Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
	scsi_event.lun = lun_id;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
						FCP_LUN_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5151
			 "0713 SCSI layer issued Device Reset (%d, %llu) "
5152 5153 5154 5155 5156 5157 5158 5159
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5160 5161
	if (status == SUCCESS)
		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5162
						LPFC_CTX_LUN);
5163 5164

	return status;
5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182
}

/**
 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a target reset by sending a TARGET_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5183
	struct lpfc_rport_data *rdata;
5184
	struct lpfc_nodelist *pnode;
5185
	unsigned tgt_id = cmnd->device->id;
H
Hannes Reinecke 已提交
5186
	uint64_t lun_id = cmnd->device->lun;
5187
	struct lpfc_scsi_event_header scsi_event;
5188
	int status;
5189

5190
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5191 5192 5193 5194 5195 5196
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0799 Target Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5197
	status = fc_block_scsi_eh(cmnd);
5198
	if (status != 0 && status != SUCCESS)
5199
		return status;
5200 5201 5202 5203 5204

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0722 Target Reset rport failure: rdata x%p\n", rdata);
5205 5206 5207 5208 5209 5210 5211
		spin_lock_irq(shost->host_lock);
		pnode->nlp_flag &= ~NLP_NPR_ADISC;
		pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
		spin_unlock_irq(shost->host_lock);
		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
					  LPFC_CTX_TGT);
		return FAST_IO_FAIL;
5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
					FCP_TARGET_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5227
			 "0723 SCSI layer issued Target Reset (%d, %llu) "
5228 5229 5230 5231 5232 5233 5234 5235
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5236 5237
	if (status == SUCCESS)
		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5238
					  LPFC_CTX_TGT);
5239
	return status;
已提交
5240 5241
}

5242
/**
5243
 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5244 5245
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
5246 5247
 * This routine does target reset to all targets on @cmnd->device->host.
 * This emulates Parallel SCSI Bus Reset Semantics.
5248
 *
5249 5250 5251
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
5252
 **/
5253
static int
5254
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
已提交
5255
{
J
James Smart 已提交
5256 5257
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
已提交
5258
	struct lpfc_nodelist *ndlp = NULL;
5259
	struct lpfc_scsi_event_header scsi_event;
5260 5261
	int match;
	int ret = SUCCESS, status, i;
5262 5263 5264 5265 5266 5267 5268

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));

5269 5270
	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
已提交
5271

5272
	status = fc_block_scsi_eh(cmnd);
5273
	if (status != 0 && status != SUCCESS)
5274
		return status;
5275

已提交
5276 5277 5278 5279 5280
	/*
	 * Since the driver manages a single bus device, reset all
	 * targets known to the driver.  Should any target reset
	 * fail, this routine returns failure to the midlayer.
	 */
5281
	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5282
		/* Search for mapped node by target ID */
已提交
5283
		match = 0;
J
James Smart 已提交
5284 5285
		spin_lock_irq(shost->host_lock);
		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5286 5287
			if (!NLP_CHK_NODE_ACT(ndlp))
				continue;
5288 5289 5290
			if (vport->phba->cfg_fcp2_no_tgt_reset &&
			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
				continue;
5291
			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5292
			    ndlp->nlp_sid == i &&
5293
			    ndlp->rport) {
已提交
5294 5295 5296 5297
				match = 1;
				break;
			}
		}
J
James Smart 已提交
5298
		spin_unlock_irq(shost->host_lock);
已提交
5299 5300
		if (!match)
			continue;
5301 5302 5303 5304 5305

		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
					i, 0, FCP_TARGET_RESET);

		if (status != SUCCESS) {
5306 5307 5308
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0700 Bus Reset on target %d failed\n",
					 i);
5309
			ret = FAILED;
已提交
5310 5311
		}
	}
5312
	/*
5313 5314 5315 5316
	 * We have to clean up i/o as : they may be orphaned by the TMFs
	 * above; or if any of the TMFs failed, they may be in an
	 * indeterminate state.
	 * We will report success if all the i/o aborts successfully.
5317
	 */
5318 5319 5320

	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
	if (status != SUCCESS)
5321
		ret = FAILED;
5322

5323 5324
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
已提交
5325 5326 5327
	return ret;
}

5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351
/**
 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does host reset to the adaptor port. It brings the HBA
 * offline, performs a board restart, and then brings the board back online.
 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
 * reject all outstanding SCSI commands to the host and error returned
 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
 * of error handling, it will only return error if resetting of the adapter
 * is not successful; in all other cases, will return success.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba *phba = vport->phba;
	int rc, ret = SUCCESS;

5352 5353 5354
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "3172 SCSI layer issued Host Reset Data:\n");

5355
	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5356 5357 5358 5359
	lpfc_offline(phba);
	rc = lpfc_sli_brdrestart(phba);
	if (rc)
		ret = FAILED;
5360 5361 5362
	rc = lpfc_online(phba);
	if (rc)
		ret = FAILED;
5363 5364
	lpfc_unblock_mgmt_io(phba);

5365 5366 5367 5368 5369
	if (ret == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "3323 Failed host reset, bring it offline\n");
		lpfc_sli4_offline_eratt(phba);
	}
5370 5371 5372
	return ret;
}

5373
/**
5374
 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385
 * @sdev: Pointer to scsi_device.
 *
 * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
 * globally available list of scsi buffers. This routine also makes sure scsi
 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
 * of scsi buffer exists for the lifetime of the driver.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
已提交
5386 5387 5388
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
J
James Smart 已提交
5389 5390
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5391
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5392
	uint32_t total = 0;
已提交
5393
	uint32_t num_to_alloc = 0;
5394
	int num_allocated = 0;
5395
	uint32_t sdev_cnt;
5396 5397 5398
	struct lpfc_device_data *device_data;
	unsigned long flags;
	struct lpfc_name target_wwpn;
已提交
5399

5400
	if (!rport || fc_remote_port_chkready(rport))
已提交
5401 5402
		return -ENXIO;

5403
	if (phba->cfg_fof) {
5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434

		/*
		 * Check to see if the device data structure for the lun
		 * exists.  If not, create one.
		 */

		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
		spin_lock_irqsave(&phba->devicelock, flags);
		device_data = __lpfc_get_device_data(phba,
						     &phba->luns,
						     &vport->fc_portname,
						     &target_wwpn,
						     sdev->lun);
		if (!device_data) {
			spin_unlock_irqrestore(&phba->devicelock, flags);
			device_data = lpfc_create_device_data(phba,
							&vport->fc_portname,
							&target_wwpn,
							sdev->lun, true);
			if (!device_data)
				return -ENOMEM;
			spin_lock_irqsave(&phba->devicelock, flags);
			list_add_tail(&device_data->listentry, &phba->luns);
		}
		device_data->rport_data = rport->dd_data;
		device_data->available = true;
		spin_unlock_irqrestore(&phba->devicelock, flags);
		sdev->hostdata = device_data;
	} else {
		sdev->hostdata = rport->dd_data;
	}
5435
	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
已提交
5436 5437 5438 5439

	/*
	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
	 * available list of scsi buffers.  Don't allocate more than the
5440 5441 5442
	 * HBA limit conveyed to the midlayer via the host structure.  The
	 * formula accounts for the lun_queue_depth + error handlers + 1
	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
已提交
5443 5444
	 */
	total = phba->total_scsi_bufs;
5445
	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5446

5447 5448 5449 5450
	/* If allocated buffers are enough do nothing */
	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
		return 0;

5451 5452
	/* Allow some exchanges to be available always to complete discovery */
	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5453 5454 5455
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0704 At limitation of %d preallocated "
				 "command buffers\n", total);
已提交
5456
		return 0;
5457 5458 5459
	/* Allow some exchanges to be available always to complete discovery */
	} else if (total + num_to_alloc >
		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5460 5461 5462 5463 5464 5465
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0705 Allocation request of %d "
				 "command buffers will exceed max of %d.  "
				 "Reducing allocation request to %d.\n",
				 num_to_alloc, phba->cfg_hba_queue_depth,
				 (phba->cfg_hba_queue_depth - total));
已提交
5466 5467
		num_to_alloc = phba->cfg_hba_queue_depth - total;
	}
5468 5469
	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
	if (num_to_alloc != num_allocated) {
5470 5471 5472 5473 5474
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0708 Allocation request of %d "
					 "command buffers did not succeed.  "
					 "Allocated %d buffers.\n",
					 num_to_alloc, num_allocated);
已提交
5475
	}
5476 5477
	if (num_allocated > 0)
		phba->total_scsi_bufs += num_allocated;
已提交
5478 5479 5480
	return 0;
}

5481
/**
5482
 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5483 5484 5485 5486 5487 5488 5489 5490 5491
 * @sdev: Pointer to scsi_device.
 *
 * This routine configures following items
 *   - Tag command queuing support for @sdev if supported.
 *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
 *
 * Return codes:
 *   0 - Success
 **/
已提交
5492 5493 5494
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
J
James Smart 已提交
5495 5496
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
5497

5498
	scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
已提交
5499

5500
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5501 5502
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5503 5504 5505 5506
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
5507 5508 5509
	return 0;
}

5510
/**
5511
 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5512 5513 5514 5515
 * @sdev: Pointer to scsi_device.
 *
 * This routine sets @sdev hostatdata filed to null.
 **/
已提交
5516 5517 5518
static void
lpfc_slave_destroy(struct scsi_device *sdev)
{
5519 5520
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5521 5522 5523
	unsigned long flags;
	struct lpfc_device_data *device_data = sdev->hostdata;

5524
	atomic_dec(&phba->sdev_cnt);
5525
	if ((phba->cfg_fof) && (device_data)) {
5526 5527 5528 5529 5530 5531
		spin_lock_irqsave(&phba->devicelock, flags);
		device_data->available = false;
		if (!device_data->oas_enabled)
			lpfc_delete_device_data(phba, device_data);
		spin_unlock_irqrestore(&phba->devicelock, flags);
	}
已提交
5532 5533 5534 5535
	sdev->hostdata = NULL;
	return;
}

5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563
/**
 * lpfc_create_device_data - creates and initializes device data structure for OAS
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun on target
 * @atomic_create: Flag to indicate if memory should be allocated using the
 *		  GFP_ATOMIC flag or not.
 *
 * This routine creates a device data structure which will contain identifying
 * information for the device (host wwpn, target wwpn, lun), state of OAS,
 * whether or not the corresponding lun is available by the system,
 * and pointer to the rport data.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_device_data - Success
 **/
struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
			struct lpfc_name *target_wwpn, uint64_t lun,
			bool atomic_create)
{

	struct lpfc_device_data *lun_info;
	int memory_flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5564
	    !(phba->cfg_fof))
5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601
		return NULL;

	/* Attempt to create the device data to contain lun info */

	if (atomic_create)
		memory_flags = GFP_ATOMIC;
	else
		memory_flags = GFP_KERNEL;
	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
	if (!lun_info)
		return NULL;
	INIT_LIST_HEAD(&lun_info->listentry);
	lun_info->rport_data  = NULL;
	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
	       sizeof(struct lpfc_name));
	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
	       sizeof(struct lpfc_name));
	lun_info->device_id.lun = lun;
	lun_info->oas_enabled = false;
	lun_info->available = false;
	return lun_info;
}

/**
 * lpfc_delete_device_data - frees a device data structure for OAS
 * @pha: Pointer to host bus adapter structure.
 * @lun_info: Pointer to device data structure to free.
 *
 * This routine frees the previously allocated device data structure passed.
 *
 **/
void
lpfc_delete_device_data(struct lpfc_hba *phba,
			struct lpfc_device_data *lun_info)
{

	if (unlikely(!phba) || !lun_info  ||
5602
	    !(phba->cfg_fof))
5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635
		return;

	if (!list_empty(&lun_info->listentry))
		list_del(&lun_info->listentry);
	mempool_free(lun_info, phba->device_data_mem_pool);
	return;
}

/**
 * __lpfc_get_device_data - returns the device data for the specified lun
 * @pha: Pointer to host bus adapter structure.
 * @list: Point to list to search.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun on target
 *
 * This routine searches the list passed for the specified lun's device data.
 * This function does not hold locks, it is the responsibility of the caller
 * to ensure the proper lock is held before calling the function.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_device_data - Success
 **/
struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
		       struct lpfc_name *vport_wwpn,
		       struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;

	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5636
	    !phba->cfg_fof)
5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697
		return NULL;

	/* Check to see if the lun is already enabled for OAS. */

	list_for_each_entry(lun_info, list, listentry) {
		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
			    sizeof(struct lpfc_name)) == 0) &&
		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
			    sizeof(struct lpfc_name)) == 0) &&
		    (lun_info->device_id.lun == lun))
			return lun_info;
	}

	return NULL;
}

/**
 * lpfc_find_next_oas_lun - searches for the next oas lun
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @starting_lun: Pointer to the lun to start searching for
 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
 * @found_target_wwpn: Pointer to the found lun's target wwpn information
 * @found_lun: Pointer to the found lun.
 * @found_lun_status: Pointer to status of the found lun.
 *
 * This routine searches the luns list for the specified lun
 * or the first lun for the vport/target.  If the vport wwpn contains
 * a zero value then a specific vport is not specified. In this case
 * any vport which contains the lun will be considered a match.  If the
 * target wwpn contains a zero value then a specific target is not specified.
 * In this case any target which contains the lun will be considered a
 * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
 * are returned.  The function will also return the next lun if available.
 * If the next lun is not found, starting_lun parameter will be set to
 * NO_MORE_OAS_LUN.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
bool
lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
		       struct lpfc_name *found_vport_wwpn,
		       struct lpfc_name *found_target_wwpn,
		       uint64_t *found_lun,
		       uint32_t *found_lun_status)
{

	unsigned long flags;
	struct lpfc_device_data *lun_info;
	struct lpfc_device_id *device_id;
	uint64_t lun;
	bool found = false;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
	    !starting_lun || !found_vport_wwpn ||
	    !found_target_wwpn || !found_lun || !found_lun_status ||
	    (*starting_lun == NO_MORE_OAS_LUN) ||
5698
	    !phba->cfg_fof)
5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781
		return false;

	lun = *starting_lun;
	*found_lun = NO_MORE_OAS_LUN;
	*starting_lun = NO_MORE_OAS_LUN;

	/* Search for lun or the lun closet in value */

	spin_lock_irqsave(&phba->devicelock, flags);
	list_for_each_entry(lun_info, &phba->luns, listentry) {
		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
			    sizeof(struct lpfc_name)) == 0)) &&
		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
			    sizeof(struct lpfc_name)) == 0)) &&
		    (lun_info->oas_enabled)) {
			device_id = &lun_info->device_id;
			if ((!found) &&
			    ((lun == FIND_FIRST_OAS_LUN) ||
			     (device_id->lun == lun))) {
				*found_lun = device_id->lun;
				memcpy(found_vport_wwpn,
				       &device_id->vport_wwpn,
				       sizeof(struct lpfc_name));
				memcpy(found_target_wwpn,
				       &device_id->target_wwpn,
				       sizeof(struct lpfc_name));
				if (lun_info->available)
					*found_lun_status =
						OAS_LUN_STATUS_EXISTS;
				else
					*found_lun_status = 0;
				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
					memset(vport_wwpn, 0x0,
					       sizeof(struct lpfc_name));
				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
					memset(target_wwpn, 0x0,
					       sizeof(struct lpfc_name));
				found = true;
			} else if (found) {
				*starting_lun = device_id->lun;
				memcpy(vport_wwpn, &device_id->vport_wwpn,
				       sizeof(struct lpfc_name));
				memcpy(target_wwpn, &device_id->target_wwpn,
				       sizeof(struct lpfc_name));
				break;
			}
		}
	}
	spin_unlock_irqrestore(&phba->devicelock, flags);
	return found;
}

/**
 * lpfc_enable_oas_lun - enables a lun for OAS operations
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun
 *
 * This routine enables a lun for oas operations.  The routines does so by
 * doing the following :
 *
 *   1) Checks to see if the device data for the lun has been created.
 *   2) If found, sets the OAS enabled flag if not set and returns.
 *   3) Otherwise, creates a device data structure.
 *   4) If successfully created, indicates the device data is for an OAS lun,
 *   indicates the lun is not available and add to the list of luns.
 *
 * Return codes:
 *   false - Error
 *   true - Success
 **/
bool
lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		    struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;
	unsigned long flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5782
	    !phba->cfg_fof)
5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838
		return false;

	spin_lock_irqsave(&phba->devicelock, flags);

	/* Check to see if the device data for the lun has been created */
	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
					  target_wwpn, lun);
	if (lun_info) {
		if (!lun_info->oas_enabled)
			lun_info->oas_enabled = true;
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}

	/* Create an lun info structure and add to list of luns */
	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
					   false);
	if (lun_info) {
		lun_info->oas_enabled = true;
		lun_info->available = false;
		list_add_tail(&lun_info->listentry, &phba->luns);
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}
	spin_unlock_irqrestore(&phba->devicelock, flags);
	return false;
}

/**
 * lpfc_disable_oas_lun - disables a lun for OAS operations
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun
 *
 * This routine disables a lun for oas operations.  The routines does so by
 * doing the following :
 *
 *   1) Checks to see if the device data for the lun is created.
 *   2) If present, clears the flag indicating this lun is for OAS.
 *   3) If the lun is not available by the system, the device data is
 *   freed.
 *
 * Return codes:
 *   false - Error
 *   true - Success
 **/
bool
lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		     struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;
	unsigned long flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5839
	    !phba->cfg_fof)
5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858
		return false;

	spin_lock_irqsave(&phba->devicelock, flags);

	/* Check to see if the lun is available. */
	lun_info = __lpfc_get_device_data(phba,
					  &phba->luns, vport_wwpn,
					  target_wwpn, lun);
	if (lun_info) {
		lun_info->oas_enabled = false;
		if (!lun_info->available)
			lpfc_delete_device_data(phba, lun_info);
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}

	spin_unlock_irqrestore(&phba->devicelock, flags);
	return false;
}
5859

已提交
5860 5861 5862 5863 5864 5865
struct scsi_host_template lpfc_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
5866 5867
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
5868
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5869
	.eh_host_reset_handler  = lpfc_host_reset_handler,
已提交
5870 5871 5872
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
5873
	.scan_finished		= lpfc_scan_finished,
已提交
5874
	.this_id		= -1,
5875
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
已提交
5876 5877
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
J
James Smart 已提交
5878
	.shost_attrs		= lpfc_hba_attrs,
5879
	.max_sectors		= 0xFFFF,
5880
	.vendor_id		= LPFC_NL_VENDOR_ID,
5881
	.change_queue_depth	= scsi_change_queue_depth,
5882
	.change_queue_type	= scsi_change_queue_type,
5883
	.use_blk_tags		= 1,
5884
	.track_queue_depth	= 1,
已提交
5885
};
5886 5887 5888 5889 5890 5891 5892

struct scsi_host_template lpfc_vport_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
5893 5894
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
5895 5896 5897 5898 5899 5900
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
5901
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5902 5903 5904 5905
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_vport_attrs,
	.max_sectors		= 0xFFFF,
5906
	.change_queue_depth	= scsi_change_queue_depth,
5907
	.change_queue_type	= scsi_change_queue_type,
5908
	.use_blk_tags		= 1,
5909
	.track_queue_depth	= 1,
5910
};