lpfc_scsi.c 178.7 KB
Newer Older
1
/*******************************************************************
已提交
2
 * This file is part of the Emulex Linux Device Driver for         *
3
 * Fibre Channel Host Bus Adapters.                                *
4
 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5
 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
J
James Smart 已提交
6
 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7
 * EMULEX and SLI are trademarks of Emulex.                        *
J
James Smart 已提交
8
 * www.broadcom.com                                                *
9
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
已提交
10 11
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
12 13 14 15 16 17 18 19 20 21
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
已提交
22 23
 *******************************************************************/
#include <linux/pci.h>
24
#include <linux/slab.h>
已提交
25
#include <linux/interrupt.h>
26
#include <linux/export.h>
27
#include <linux/delay.h>
28
#include <asm/unaligned.h>
29
#include <linux/t10-pi.h>
30 31
#include <linux/crc-t10dif.h>
#include <net/checksum.h>
已提交
32 33 34

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
35
#include <scsi/scsi_eh.h>
已提交
36 37 38 39 40
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>

#include "lpfc_version.h"
41
#include "lpfc_hw4.h"
已提交
42 43
#include "lpfc_hw.h"
#include "lpfc_sli.h"
44
#include "lpfc_sli4.h"
45
#include "lpfc_nl.h"
已提交
46 47
#include "lpfc_disc.h"
#include "lpfc.h"
48
#include "lpfc_scsi.h"
已提交
49 50
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
51
#include "lpfc_vport.h"
已提交
52 53 54 55

#define LPFC_RESET_WAIT  2
#define LPFC_ABORT_WAIT  2

56
int _dump_buf_done = 1;
57 58

static char *dif_op_str[] = {
59 60 61 62 63 64 65 66 67
	"PROT_NORMAL",
	"PROT_READ_INSERT",
	"PROT_WRITE_STRIP",
	"PROT_READ_STRIP",
	"PROT_WRITE_INSERT",
	"PROT_READ_PASS",
	"PROT_WRITE_PASS",
};

68 69 70 71 72 73
struct scsi_dif_tuple {
	__be16 guard_tag;       /* Checksum */
	__be16 app_tag;         /* Opaque storage */
	__be32 ref_tag;         /* Target LBA or indirect LBA */
};

74 75 76 77 78
static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
{
	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;

79
	if (vport->phba->cfg_fof)
80 81 82 83 84
		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
	else
		return (struct lpfc_rport_data *)sdev->hostdata;
}

85 86
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
87 88
static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
89 90
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
91 92

static void
93
lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
94 95 96 97 98
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_sglist(cmnd);

	if (!_dump_buf_data) {
99 100
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
101 102 103 104 105 106
				__func__);
		return;
	}


	if (!sgde) {
107 108
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9051 BLKGRD: ERROR: data scatterlist is null\n");
109 110 111 112 113 114 115 116 117 118 119 120 121
		return;
	}

	dst = (void *) _dump_buf_data;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

static void
122
lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
123 124 125 126 127
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_prot_sglist(cmnd);

	if (!_dump_buf_dif) {
128 129
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
130 131 132 133 134
				__func__);
		return;
	}

	if (!sgde) {
135 136
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
137 138 139 140 141 142 143 144 145 146 147 148
		return;
	}

	dst = _dump_buf_dif;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
{
	return sc->device->sector_size;
}

#define LPFC_CHECK_PROTECT_GUARD	1
#define LPFC_CHECK_PROTECT_REF		2
static inline unsigned
lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
{
	return 1;
}

static inline unsigned
lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
{
	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
		return 0;
	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
		return 1;
	return 0;
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/**
 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called from the lpfc_prep_task_mgmt_cmd function to
 * set the last bit in the response sge entry.
 **/
static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
				struct lpfc_scsi_buf *lpfc_cmd)
{
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
	if (sgl) {
		sgl += 1;
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}
}

194
/**
195
 * lpfc_update_stats - Update statistical data for the command completion
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called when there is a command completion and this
 * function updates the statistical data for the command completion.
 **/
static void
lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
{
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	unsigned long flags;
	struct Scsi_Host  *shost = cmd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	unsigned long latency;
	int i;

	if (cmd->result)
		return;

217 218
	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);

219 220 221
	spin_lock_irqsave(shost->host_lock, flags);
	if (!vport->stat_data_enabled ||
		vport->stat_data_blocked ||
222
		!pnode ||
223 224 225 226 227 228 229 230 231
		!pnode->lat_data ||
		(phba->bucket_type == LPFC_NO_BUCKET)) {
		spin_unlock_irqrestore(shost->host_lock, flags);
		return;
	}

	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
			phba->bucket_step;
232 233 234 235 236
		/* check array subscript bounds */
		if (i < 0)
			i = 0;
		else if (i >= LPFC_MAX_BUCKET_COUNT)
			i = LPFC_MAX_BUCKET_COUNT - 1;
237 238 239 240 241 242 243 244 245 246 247
	} else {
		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
			if (latency <= (phba->bucket_base +
				((1<<i)*phba->bucket_step)))
				break;
	}

	pnode->lat_data[i].cmd_count++;
	spin_unlock_irqrestore(shost->host_lock, flags);
}

248
/**
249
 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
250 251 252 253 254 255 256 257 258
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called when there is resource error in driver or firmware.
 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
 * posts at most 1 event each second. This routine wakes up worker thread of
 * @phba to process WORKER_RAM_DOWN_EVENT event.
 *
 * This routine should be called with no lock held.
 **/
259
void
260
lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
261 262
{
	unsigned long flags;
263
	uint32_t evt_posted;
M
Manuel Schölling 已提交
264
	unsigned long expires;
265 266 267 268 269

	spin_lock_irqsave(&phba->hbalock, flags);
	atomic_inc(&phba->num_rsrc_err);
	phba->last_rsrc_error_time = jiffies;

M
Manuel Schölling 已提交
270 271
	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
	if (time_after(expires, jiffies)) {
272 273 274 275 276 277 278 279 280
		spin_unlock_irqrestore(&phba->hbalock, flags);
		return;
	}

	phba->last_ramp_down_time = jiffies;

	spin_unlock_irqrestore(&phba->hbalock, flags);

	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
281 282
	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
	if (!evt_posted)
283 284 285
		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);

286 287
	if (!evt_posted)
		lpfc_worker_wake_up(phba);
288 289 290
	return;
}

291
/**
292
 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
293 294 295 296 297 298
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
 * thread.This routine reduces queue depth for all scsi device on each vport
 * associated with @phba.
 **/
299 300 301
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
302 303
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
304
	struct scsi_device *sdev;
305
	unsigned long new_queue_depth;
306
	unsigned long num_rsrc_err, num_cmd_success;
307
	int i;
308 309 310 311

	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
	num_cmd_success = atomic_read(&phba->num_cmd_success);

312 313 314 315 316 317 318 319
	/*
	 * The error and success command counters are global per
	 * driver instance.  If another handler has already
	 * operated on this error event, just exit.
	 */
	if (num_rsrc_err == 0)
		return;

320 321
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
322
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
323 324
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
325
				new_queue_depth =
326 327 328 329 330 331 332
					sdev->queue_depth * num_rsrc_err /
					(num_rsrc_err + num_cmd_success);
				if (!new_queue_depth)
					new_queue_depth = sdev->queue_depth - 1;
				else
					new_queue_depth = sdev->queue_depth -
								new_queue_depth;
333
				scsi_change_queue_depth(sdev, new_queue_depth);
334
			}
335
		}
336
	lpfc_destroy_vport_work_array(phba, vports);
337 338 339 340
	atomic_set(&phba->num_rsrc_err, 0);
	atomic_set(&phba->num_cmd_success, 0);
}

341
/**
342
 * lpfc_scsi_dev_block - set all scsi hosts to block state
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
 * @phba: Pointer to HBA context object.
 *
 * This function walks vport list and set each SCSI host to block state
 * by invoking fc_remote_port_delete() routine. This function is invoked
 * with EEH when device's PCI slot has been permanently disabled.
 **/
void
lpfc_scsi_dev_block(struct lpfc_hba *phba)
{
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
	struct scsi_device *sdev;
	struct fc_rport *rport;
	int i;

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
360
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
361 362 363 364 365 366 367 368 369
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
				rport = starget_to_rport(scsi_target(sdev));
				fc_remote_port_delete(rport);
			}
		}
	lpfc_destroy_vport_work_array(phba, vports);
}

370
/**
371
 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
372
 * @vport: The virtual port for which this call being executed.
373
 * @num_to_allocate: The requested number of buffers to allocate.
374
 *
375 376 377 378 379 380
 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
 * the scsi buffer contains all the necessary information needed to initiate
 * a SCSI I/O. The non-DMAable buffer region contains information to build
 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
 * and the initial BPL. In addition to allocating memory, the FCP CMND and
 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
381 382
 *
 * Return codes:
383 384
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
385
 **/
386 387
static int
lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
已提交
388
{
J
James Smart 已提交
389
	struct lpfc_hba *phba = vport->phba;
已提交
390 391 392
	struct lpfc_scsi_buf *psb;
	struct ulp_bde64 *bpl;
	IOCB_t *iocb;
393 394 395
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
	dma_addr_t pdma_phys_bpl;
396
	uint16_t iotag;
397 398 399 400 401 402 403 404 405 406
	int bcnt, bpl_size;

	bpl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp), bpl_size);
已提交
407

408 409 410 411
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
已提交
412

413 414 415 416 417 418
		/*
		 * Get memory from the pci pool to map the virt space to pci
		 * bus space for an I/O.  The DMA buffer includes space for the
		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
		 * necessary to support the sg_tablesize.
		 */
419
		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
420 421 422 423 424 425 426 427 428 429
					GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}


		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
430
			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
431
				      psb->data, psb->dma_handle);
432 433 434 435 436 437 438 439
			kfree(psb);
			break;
		}
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;

		psb->fcp_cmnd = psb->data;
		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
440
			sizeof(struct fcp_rsp);
已提交
441

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
		/* Initialize local short-hand pointers. */
		bpl = psb->fcp_bpl;
		pdma_phys_fcp_cmd = psb->dma_handle;
		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
			sizeof(struct fcp_rsp);

		/*
		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
		 * are sg list bdes.  Initialize the first two and leave the
		 * rest for queuecommand.
		 */
		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);

		/* Setup the physical region for the FCP RSP */
		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		if ((phba->sli_rev == 3) &&
				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
			/* fill in immediate fcp command BDE */
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
					unsli3.fcp_ext.icd);
			iocb->un.fcpi64.bdl.addrHigh = 0;
			iocb->ulpBdeCount = 0;
			iocb->ulpLe = 0;
L
Lucas De Marchi 已提交
483
			/* fill in response BDE */
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
							BUFF_TYPE_BDE_64;
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
				sizeof(struct fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrLow =
				putPaddrLow(pdma_phys_fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrHigh =
				putPaddrHigh(pdma_phys_fcp_rsp);
		} else {
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
			iocb->un.fcpi64.bdl.bdeSize =
					(2 * sizeof(struct ulp_bde64));
			iocb->un.fcpi64.bdl.addrLow =
					putPaddrLow(pdma_phys_bpl);
			iocb->un.fcpi64.bdl.addrHigh =
					putPaddrHigh(pdma_phys_bpl);
			iocb->ulpBdeCount = 1;
			iocb->ulpLe = 1;
		}
		iocb->ulpClass = CLASS3;
		psb->status = IOSTAT_SUCCESS;
505
		/* Put it back into the SCSI buffer list */
J
James Smart 已提交
506
		psb->cur_iocbq.context1  = psb;
507
		lpfc_release_scsi_buf_s3(phba, psb);
已提交
508

509
	}
已提交
510

511
	return bcnt;
已提交
512 513
}

514 515 516 517 518 519 520 521 522 523 524 525 526 527
/**
 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
 * @vport: pointer to lpfc vport data structure.
 *
 * This routine is invoked by the vport cleanup for deletions and the cleanup
 * for an ndlp on removal.
 **/
void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;

528 529
	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
		return;
530 531 532 533 534 535 536 537 538 539 540 541
	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	list_for_each_entry_safe(psb, next_psb,
				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->rdata && psb->rdata->pnode
			&& psb->rdata->pnode->vport == vport)
			psb->rdata = NULL;
	}
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	spin_unlock_irqrestore(&phba->hbalock, iflag);
}

542 543 544 545 546 547 548 549 550 551 552 553 554
/**
 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
 * @phba: pointer to lpfc hba data structure.
 * @axri: pointer to the fcp xri abort wcqe structure.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
 * FCP aborted xri.
 **/
void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
			  struct sli4_wcqe_xri_aborted *axri)
{
	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
555
	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
556 557
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;
558 559
	struct lpfc_iocbq *iocbq;
	int i;
560 561
	struct lpfc_nodelist *ndlp;
	int rrq_empty = 0;
562
	struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
563

564 565
	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
		return;
566 567
	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
568 569 570 571
	list_for_each_entry_safe(psb, next_psb,
		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->cur_iocbq.sli4_xritag == xri) {
			list_del(&psb->list);
572
			psb->exch_busy = 0;
573
			psb->status = IOSTAT_SUCCESS;
574 575
			spin_unlock(
				&phba->sli4_hba.abts_scsi_buf_list_lock);
576 577 578 579 580
			if (psb->rdata && psb->rdata->pnode)
				ndlp = psb->rdata->pnode;
			else
				ndlp = NULL;

581
			rrq_empty = list_empty(&phba->active_rrq_list);
582
			spin_unlock_irqrestore(&phba->hbalock, iflag);
583
			if (ndlp) {
584 585
				lpfc_set_rrq_active(phba, ndlp,
					psb->cur_iocbq.sli4_lxritag, rxid, 1);
586 587
				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
			}
588
			lpfc_release_scsi_buf_s4(phba, psb);
589 590
			if (rrq_empty)
				lpfc_worker_wake_up(phba);
591 592 593
			return;
		}
	}
594 595 596 597 598 599 600 601 602 603 604 605
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	for (i = 1; i <= phba->sli.last_iotag; i++) {
		iocbq = phba->sli.iocbq_lookup[i];

		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
			continue;
		if (iocbq->sli4_xritag != xri)
			continue;
		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
		psb->exch_busy = 0;
		spin_unlock_irqrestore(&phba->hbalock, iflag);
606
		if (!list_empty(&pring->txq))
607
			lpfc_worker_wake_up(phba);
608 609 610 611
		return;

	}
	spin_unlock_irqrestore(&phba->hbalock, iflag);
612 613 614
}

/**
615
 * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
616
 * @phba: pointer to lpfc hba data structure.
617
 * @post_sblist: pointer to the scsi buffer list.
618
 *
619 620 621 622 623 624
 * This routine walks a list of scsi buffers that was passed in. It attempts
 * to construct blocks of scsi buffer sgls which contains contiguous xris and
 * uses the non-embedded SGL block post mailbox commands to post to the port.
 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
 * embedded SGL post mailbox command for posting. The @post_sblist passed in
 * must be local list, thus no lock is needed when manipulate the list.
625
 *
626
 * Returns: 0 = failure, non-zero number of successfully posted buffers.
627
 **/
628
static int
629 630
lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
			     struct list_head *post_sblist, int sb_count)
631
{
632
	struct lpfc_scsi_buf *psb, *psb_next;
633
	int status, sgl_size;
634 635 636 637 638 639 640 641 642 643 644
	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
	dma_addr_t pdma_phys_bpl1;
	int last_xritag = NO_XRI;
	LIST_HEAD(prep_sblist);
	LIST_HEAD(blck_sblist);
	LIST_HEAD(scsi_sblist);

	/* sanity check */
	if (sb_count <= 0)
		return -EINVAL;

645 646 647
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
		list_del_init(&psb->list);
		block_cnt++;
		if ((last_xritag != NO_XRI) &&
		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
			/* a hole in xri block, form a sgl posting block */
			list_splice_init(&prep_sblist, &blck_sblist);
			post_cnt = block_cnt - 1;
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			block_cnt = 1;
		} else {
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			/* enough sgls for non-embed sgl mbox command */
			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
				block_cnt = 0;
667
			}
668 669 670
		}
		num_posting++;
		last_xritag = psb->cur_iocbq.sli4_xritag;
671

672 673 674 675 676 677 678 679
		/* end of repost sgl list condition for SCSI buffers */
		if (num_posting == sb_count) {
			if (post_cnt == 0) {
				/* last sgl posting block */
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
			} else if (block_cnt == 1) {
				/* last single sgl with non-contiguous xri */
680
				if (sgl_size > SGL_PAGE_SIZE)
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
					pdma_phys_bpl1 = psb->dma_phys_bpl +
								SGL_PAGE_SIZE;
				else
					pdma_phys_bpl1 = 0;
				status = lpfc_sli4_post_sgl(phba,
						psb->dma_phys_bpl,
						pdma_phys_bpl1,
						psb->cur_iocbq.sli4_xritag);
				if (status) {
					/* failure, put on abort scsi list */
					psb->exch_busy = 1;
				} else {
					/* success, put on SCSI buffer list */
					psb->exch_busy = 0;
					psb->status = IOSTAT_SUCCESS;
					num_posted++;
				}
				/* success, put on SCSI buffer sgl list */
				list_add_tail(&psb->list, &scsi_sblist);
			}
		}
702

703 704
		/* continue until a nembed page worth of sgls */
		if (post_cnt == 0)
705
			continue;
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721

		/* post block of SCSI buffer list sgls */
		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
						       post_cnt);

		/* don't reset xirtag due to hole in xri block */
		if (block_cnt == 0)
			last_xritag = NO_XRI;

		/* reset SCSI buffer post count for next round of posting */
		post_cnt = 0;

		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
		while (!list_empty(&blck_sblist)) {
			list_remove_head(&blck_sblist, psb,
					 struct lpfc_scsi_buf, list);
722
			if (status) {
723
				/* failure, put on abort scsi list */
724 725
				psb->exch_busy = 1;
			} else {
726
				/* success, put on SCSI buffer list */
727
				psb->exch_busy = 0;
728
				psb->status = IOSTAT_SUCCESS;
729
				num_posted++;
730
			}
731
			list_add_tail(&psb->list, &scsi_sblist);
732 733
		}
	}
734 735 736 737 738 739 740 741 742 743
	/* Push SCSI buffers with sgl posted to the availble list */
	while (!list_empty(&scsi_sblist)) {
		list_remove_head(&scsi_sblist, psb,
				 struct lpfc_scsi_buf, list);
		lpfc_release_scsi_buf_s4(phba, psb);
	}
	return num_posted;
}

/**
744
 * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
 * @phba: pointer to lpfc hba data structure.
 *
 * This routine walks the list of scsi buffers that have been allocated and
 * repost them to the port by using SGL block post. This is needed after a
 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
 *
 * Returns: 0 = success, non-zero failure.
 **/
int
lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
{
	LIST_HEAD(post_sblist);
	int num_posted, rc = 0;

	/* get all SCSI buffers need to repost to a local list */
762
	spin_lock_irq(&phba->scsi_buf_list_get_lock);
763
	spin_lock(&phba->scsi_buf_list_put_lock);
764 765
	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
766
	spin_unlock(&phba->scsi_buf_list_put_lock);
767
	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
768 769 770 771 772 773 774 775 776

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist)) {
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
						phba->sli4_hba.scsi_xri_cnt);
		/* failed to post any scsi buffer, return error */
		if (num_posted == 0)
			rc = -EIO;
	}
777 778 779 780 781 782 783 784
	return rc;
}

/**
 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
785
 * This routine allocates scsi buffers for device with SLI-4 interface spec,
786
 * the scsi buffer contains all the necessary information needed to initiate
787 788
 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
 * them on a list, it post them to the port by using SGL block post.
789 790
 *
 * Return codes:
791
 *   int - number of scsi buffers that were allocated and posted.
792 793 794 795 796 797 798 799 800 801 802
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static int
lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb;
	struct sli4_sge *sgl;
	IOCB_t *iocb;
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
803
	dma_addr_t pdma_phys_bpl;
804
	uint16_t iotag, lxri = 0;
805
	int bcnt, num_posted, sgl_size;
806 807 808
	LIST_HEAD(prep_sblist);
	LIST_HEAD(post_sblist);
	LIST_HEAD(scsi_sblist);
809

810 811 812 813 814 815 816 817 818
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp));

819 820 821 822 823
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
		/*
824 825 826 827
		 * Get memory from the pci pool to map the virt space to
		 * pci bus space for an I/O. The DMA buffer includes space
		 * for the struct fcp_cmnd, struct fcp_rsp and the number
		 * of bde's necessary to support the sg_tablesize.
828
		 */
829
		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
830 831 832 833 834 835
						GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}

836 837 838 839
		/*
		 * 4K Page alignment is CRITICAL to BlockGuard, double check
		 * to be sure.
		 */
840 841
		if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
		    (((unsigned long)(psb->data) &
842
		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
843 844 845 846
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"3369 Memory alignment error "
					"addr=%lx\n",
					(unsigned long)psb->data);
847
			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
848 849 850 851 852
				      psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}

853 854 855

		lxri = lpfc_sli4_next_xritag(phba);
		if (lxri == NO_XRI) {
856
			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
857
				      psb->data, psb->dma_handle);
858 859 860 861
			kfree(psb);
			break;
		}

862 863 864
		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
865
			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
866
				      psb->data, psb->dma_handle);
867
			kfree(psb);
868
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
869
					"3368 Failed to allocate IOTAG for"
870 871
					" XRI:0x%x\n", lxri);
			lpfc_sli4_free_xri(phba, lxri);
872 873
			break;
		}
874 875
		psb->cur_iocbq.sli4_lxritag = lxri;
		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
876 877
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
		psb->fcp_bpl = psb->data;
878
		psb->fcp_cmnd = (psb->data + sgl_size);
879 880 881 882 883 884
		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
					sizeof(struct fcp_cmnd));

		/* Initialize local short-hand pointers. */
		sgl = (struct sli4_sge *)psb->fcp_bpl;
		pdma_phys_bpl = psb->dma_handle;
885
		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
886 887 888
		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);

		/*
889 890 891
		 * The first two bdes are the FCP_CMD and FCP_RSP.
		 * The balance are sg list bdes. Initialize the
		 * first two and leave the rest for queuecommand.
892 893 894
		 */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
895
		sgl->word2 = le32_to_cpu(sgl->word2);
896 897
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
898
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
899 900 901 902 903
		sgl++;

		/* Setup the physical region for the FCP RSP */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
904
		sgl->word2 = le32_to_cpu(sgl->word2);
905 906
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
907
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
		/* setting the BLP size to 2 * sizeof BDE may not be correct.
		 * We are setting the bpl to point to out sgl. An sgl's
		 * entries are 16 bytes, a bpl entries are 12 bytes.
		 */
		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
		iocb->ulpBdeCount = 1;
		iocb->ulpLe = 1;
		iocb->ulpClass = CLASS3;
926
		psb->cur_iocbq.context1 = psb;
927 928
		psb->dma_phys_bpl = pdma_phys_bpl;

929 930
		/* add the scsi buffer to a post list */
		list_add_tail(&psb->list, &post_sblist);
931
		spin_lock_irq(&phba->scsi_buf_list_get_lock);
932
		phba->sli4_hba.scsi_xri_cnt++;
933
		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
934
	}
935
	lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
936 937 938 939 940 941 942 943 944 945 946
			"3021 Allocate %d out of %d requested new SCSI "
			"buffers\n", bcnt, num_to_alloc);

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist))
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
							  &post_sblist, bcnt);
	else
		num_posted = 0;

	return num_posted;
947 948
}

949
/**
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
 * This routine wraps the actual SCSI buffer allocator function pointer from
 * the lpfc_hba struct.
 *
 * Return codes:
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static inline int
lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
{
	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
}

/**
968
 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
969
 * @phba: The HBA for which this call is being executed.
970 971 972 973 974 975 976 977
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
978
static struct lpfc_scsi_buf*
979
lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
已提交
980
{
981
	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
982
	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
983
	unsigned long iflag = 0;
984

985
	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
986 987 988
	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
			 list);
	if (!lpfc_cmd) {
989
		spin_lock(&phba->scsi_buf_list_put_lock);
990 991 992 993 994
		list_splice(&phba->lpfc_scsi_buf_list_put,
			    &phba->lpfc_scsi_buf_list_get);
		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
		list_remove_head(scsi_buf_list_get, lpfc_cmd,
				 struct lpfc_scsi_buf, list);
995
		spin_unlock(&phba->scsi_buf_list_put_lock);
996
	}
997
	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
998 999
	return  lpfc_cmd;
}
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
/**
 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
1014
	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1015
	unsigned long iflag = 0;
1016 1017
	int found = 0;

1018
	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1019 1020
	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
				 &phba->lpfc_scsi_buf_list_get, list) {
1021
		if (lpfc_test_rrq_active(phba, ndlp,
1022
					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1023
			continue;
1024
		list_del_init(&lpfc_cmd->list);
1025
		found = 1;
1026
		break;
1027
	}
1028
	if (!found) {
1029
		spin_lock(&phba->scsi_buf_list_put_lock);
1030 1031 1032
		list_splice(&phba->lpfc_scsi_buf_list_put,
			    &phba->lpfc_scsi_buf_list_get);
		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1033
		spin_unlock(&phba->scsi_buf_list_put_lock);
1034 1035
		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
					 &phba->lpfc_scsi_buf_list_get, list) {
1036 1037 1038
			if (lpfc_test_rrq_active(
				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
				continue;
1039
			list_del_init(&lpfc_cmd->list);
1040 1041 1042 1043
			found = 1;
			break;
		}
	}
1044
	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1045 1046
	if (!found)
		return NULL;
1047
	return  lpfc_cmd;
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
}
/**
 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
	return  phba->lpfc_get_scsi_buf(phba, ndlp);
}
已提交
1065

1066
/**
1067
 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1068 1069 1070 1071 1072 1073
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
1074
static void
1075
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1076
{
1077
	unsigned long iflag = 0;
已提交
1078

1079 1080 1081 1082 1083
	psb->seg_cnt = 0;
	psb->nonsg_phys = 0;
	psb->prot_seg_cnt = 0;

	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1084
	psb->pCmd = NULL;
1085
	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1086 1087
	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
已提交
1088 1089
}

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
/**
 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
 * and cannot be reused for at least RA_TOV amount of time if it was
 * aborted.
 **/
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
	unsigned long iflag = 0;

1105 1106 1107 1108
	psb->seg_cnt = 0;
	psb->nonsg_phys = 0;
	psb->prot_seg_cnt = 0;

1109
	if (psb->exch_busy) {
1110 1111 1112 1113 1114 1115 1116 1117 1118
		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
		psb->pCmd = NULL;
		list_add_tail(&psb->list,
			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
	} else {
		psb->pCmd = NULL;
1119
		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1120 1121 1122
		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1123 1124 1125
	}
}

1126
/**
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{

	phba->lpfc_release_scsi_buf(phba, psb);
}

/**
 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1143 1144 1145 1146
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1147
 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1148
 * through sg elements and format the bde. This routine also initializes all
1149
 * IOCB fields which are dependent on scsi command request buffer.
1150 1151 1152 1153 1154
 *
 * Return codes:
 *   1 - Error
 *   0 - Success
 **/
已提交
1155
static int
1156
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
已提交
1157 1158 1159 1160 1161
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1162
	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
已提交
1163
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1164
	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
已提交
1165
	dma_addr_t physaddr;
1166
	uint32_t num_bde = 0;
1167
	int nseg, datadir = scsi_cmnd->sc_data_direction;
已提交
1168 1169 1170 1171 1172 1173 1174 1175

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	bpl += 2;
1176
	if (scsi_sg_count(scsi_cmnd)) {
已提交
1177 1178 1179 1180 1181 1182 1183
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

1184 1185 1186 1187 1188
		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
				  scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!nseg))
			return 1;

1189
		lpfc_cmd->seg_cnt = nseg;
已提交
1190
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1191 1192
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9064 BLKGRD: %s: Too many sg segments from "
1193
			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1194
			       __func__, phba->cfg_sg_seg_cnt,
已提交
1195
			       lpfc_cmd->seg_cnt);
1196
			lpfc_cmd->seg_cnt = 0;
1197
			scsi_dma_unmap(scsi_cmnd);
已提交
1198 1199 1200 1201 1202 1203 1204 1205
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the bde's.
1206 1207 1208
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
已提交
1209
		 */
1210
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
已提交
1211
			physaddr = sg_dma_address(sgel);
1212
			if (phba->sli_rev == 3 &&
1213
			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1214
			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
				data_bde->addrLow = putPaddrLow(physaddr);
				data_bde->addrHigh = putPaddrHigh(physaddr);
				data_bde++;
			} else {
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				bpl->tus.f.bdeSize = sg_dma_len(sgel);
				bpl->tus.w = le32_to_cpu(bpl->tus.w);
				bpl->addrLow =
					le32_to_cpu(putPaddrLow(physaddr));
				bpl->addrHigh =
					le32_to_cpu(putPaddrHigh(physaddr));
				bpl++;
			}
已提交
1231
		}
1232
	}
已提交
1233 1234 1235

	/*
	 * Finish initializing those IOCB fields that are dependent on the
1236 1237 1238
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized and for SLI-3 the extended bde count is
	 * explicitly reinitialized since all iocb memory resources are reused.
已提交
1239
	 */
1240
	if (phba->sli_rev == 3 &&
1241 1242
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
			/*
			 * The extended IOCB format can only fit 3 BDE or a BPL.
			 * This I/O has more than 3 BDE so the 1st data bde will
			 * be a BPL that is filled in here.
			 */
			physaddr = lpfc_cmd->dma_handle;
			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
			data_bde->tus.f.bdeSize = (num_bde *
						   sizeof(struct ulp_bde64));
			physaddr += (sizeof(struct fcp_cmnd) +
				     sizeof(struct fcp_rsp) +
				     (2 * sizeof(struct ulp_bde64)));
			data_bde->addrHigh = putPaddrHigh(physaddr);
			data_bde->addrLow = putPaddrLow(physaddr);
L
Lucas De Marchi 已提交
1258
			/* ebde count includes the response bde and data bpl */
1259 1260
			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
		} else {
L
Lucas De Marchi 已提交
1261
			/* ebde count includes the response bde and data bdes */
1262 1263 1264 1265 1266
			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
		}
	} else {
		iocb_cmd->un.fcpi64.bdl.bdeSize =
			((num_bde + 2) * sizeof(struct ulp_bde64));
1267
		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1268
	}
1269
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1270 1271 1272 1273 1274

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
1275
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1276 1277 1278
	return 0;
}

1279
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1280

1281
/* Return BG_ERR_INIT if error injection is detected by Initiator */
1282
#define BG_ERR_INIT	0x1
1283
/* Return BG_ERR_TGT if error injection is detected by Target */
1284
#define BG_ERR_TGT	0x2
1285
/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
1286
#define BG_ERR_SWAP	0x10
1287 1288 1289 1290
/**
 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
 * error injection
 **/
1291
#define BG_ERR_CHECK	0x20
1292 1293 1294 1295

/**
 * lpfc_bg_err_inject - Determine if we should inject an error
 * @phba: The Hba for which this call is being executed.
1296 1297 1298 1299 1300
 * @sc: The SCSI command to examine
 * @reftag: (out) BlockGuard reference tag for transmitted data
 * @apptag: (out) BlockGuard application tag for transmitted data
 * @new_guard (in) Value to replace CRC with if needed
 *
1301
 * Returns BG_ERR_* bit mask or 0 if request ignored
1302
 **/
1303 1304 1305 1306 1307
static int
lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{
	struct scatterlist *sgpe; /* s/g prot entry */
1308
	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1309
	struct scsi_dif_tuple *src = NULL;
1310 1311
	struct lpfc_nodelist *ndlp;
	struct lpfc_rport_data *rdata;
1312 1313 1314 1315 1316
	uint32_t op = scsi_get_prot_op(sc);
	uint32_t blksize;
	uint32_t numblks;
	sector_t lba;
	int rc = 0;
1317
	int blockoff = 0;
1318 1319 1320 1321

	if (op == SCSI_PROT_NORMAL)
		return 0;

1322
	sgpe = scsi_prot_sglist(sc);
1323
	lba = scsi_get_lba(sc);
1324 1325

	/* First check if we need to match the LBA */
1326 1327 1328 1329 1330 1331 1332 1333
	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
		blksize = lpfc_cmd_blksize(sc);
		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;

		/* Make sure we have the right LBA if one is specified */
		if ((phba->lpfc_injerr_lba < lba) ||
			(phba->lpfc_injerr_lba >= (lba + numblks)))
			return 0;
1334 1335 1336 1337 1338 1339 1340
		if (sgpe) {
			blockoff = phba->lpfc_injerr_lba - lba;
			numblks = sg_dma_len(sgpe) /
				sizeof(struct scsi_dif_tuple);
			if (numblks < blockoff)
				blockoff = numblks;
		}
1341 1342
	}

1343
	/* Next check if we need to match the remote NPortID or WWPN */
1344
	rdata = lpfc_rport_data_from_scsi_device(sc->device);
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
	if (rdata && rdata->pnode) {
		ndlp = rdata->pnode;

		/* Make sure we have the right NPortID if one is specified */
		if (phba->lpfc_injerr_nportid  &&
			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
			return 0;

		/*
		 * Make sure we have the right WWPN if one is specified.
		 * wwn[0] should be a non-zero NAA in a good WWPN.
		 */
		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
				sizeof(struct lpfc_name)) != 0))
			return 0;
	}

	/* Setup a ptr to the protection data if the SCSI host provides it */
	if (sgpe) {
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
		src += blockoff;
		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
	}

1370 1371
	/* Should we change the Reference Tag */
	if (reftag) {
1372 1373 1374
		if (phba->lpfc_injerr_wref_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1375 1376 1377 1378 1379 1380 1381 1382
				if (src) {
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */
1383 1384 1385 1386 1387

					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9076 BLKGRD: Injecting reftag error: "
					"write lba x%lx + x%x oldrefTag x%x\n",
					(unsigned long)lba, blockoff,
1388
					be32_to_cpu(src->ref_tag));
1389

1390
					/*
1391 1392
					 * Save the old ref_tag so we can
					 * restore it on completion.
1393
					 */
1394 1395 1396 1397 1398 1399 1400 1401 1402
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_REFTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->ref_tag;
					}
					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1403
					phba->lpfc_injerr_wref_cnt--;
1404 1405 1406 1407 1408 1409 1410
					if (phba->lpfc_injerr_wref_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1411 1412
					rc = BG_ERR_TGT | BG_ERR_CHECK;

1413 1414 1415
					break;
				}
				/* Drop thru */
1416
			case SCSI_PROT_WRITE_INSERT:
1417
				/*
1418 1419 1420
				 * For WRITE_INSERT, force the error
				 * to be sent on the wire. It should be
				 * detected by the Target.
1421
				 */
1422
				/* DEADBEEF will be the reftag on the wire */
1423 1424
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1425 1426 1427 1428 1429 1430 1431
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
					LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1432
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1433 1434

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1435
					"9078 BLKGRD: Injecting reftag error: "
1436 1437
					"write lba x%lx\n", (unsigned long)lba);
				break;
1438
			case SCSI_PROT_WRITE_STRIP:
1439
				/*
1440 1441 1442
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1443
				 */
1444 1445
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1446 1447 1448 1449 1450 1451 1452
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1453
				rc = BG_ERR_INIT;
1454 1455

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1456
					"9077 BLKGRD: Injecting reftag error: "
1457
					"write lba x%lx\n", (unsigned long)lba);
1458
				break;
1459
			}
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
		}
		if (phba->lpfc_injerr_rref_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1471 1472
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_rref_cnt--;
1473 1474 1475 1476 1477 1478 1479
				if (phba->lpfc_injerr_rref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1480
				rc = BG_ERR_INIT;
1481 1482

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1483
					"9079 BLKGRD: Injecting reftag error: "
1484
					"read lba x%lx\n", (unsigned long)lba);
1485
				break;
1486 1487 1488 1489 1490 1491
			}
		}
	}

	/* Should we change the Application Tag */
	if (apptag) {
1492 1493 1494
		if (phba->lpfc_injerr_wapp_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1495
				if (src) {
1496 1497 1498 1499 1500 1501 1502 1503
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */

1504 1505 1506 1507
					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9080 BLKGRD: Injecting apptag error: "
					"write lba x%lx + x%x oldappTag x%x\n",
					(unsigned long)lba, blockoff,
1508
					be16_to_cpu(src->app_tag));
1509 1510

					/*
1511 1512
					 * Save the old app_tag so we can
					 * restore it on completion.
1513
					 */
1514 1515 1516 1517 1518 1519 1520 1521 1522
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_APPTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->app_tag;
					}
					src->app_tag = cpu_to_be16(0xDEAD);
1523
					phba->lpfc_injerr_wapp_cnt--;
1524 1525 1526 1527 1528 1529 1530
					if (phba->lpfc_injerr_wapp_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1531
					rc = BG_ERR_TGT | BG_ERR_CHECK;
1532 1533 1534
					break;
				}
				/* Drop thru */
1535
			case SCSI_PROT_WRITE_INSERT:
1536
				/*
1537 1538 1539
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1540
				 */
1541
				/* DEAD will be the apptag on the wire */
1542 1543
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1544 1545 1546 1547 1548 1549 1550
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1551
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1552

1553
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1554
					"0813 BLKGRD: Injecting apptag error: "
1555 1556
					"write lba x%lx\n", (unsigned long)lba);
				break;
1557
			case SCSI_PROT_WRITE_STRIP:
1558
				/*
1559 1560 1561
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1562
				 */
1563 1564
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1565 1566 1567 1568 1569 1570 1571
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1572
				rc = BG_ERR_INIT;
1573 1574

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1575
					"0812 BLKGRD: Injecting apptag error: "
1576
					"write lba x%lx\n", (unsigned long)lba);
1577
				break;
1578
			}
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
		}
		if (phba->lpfc_injerr_rapp_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1590 1591
				*apptag = 0xDEAD;
				phba->lpfc_injerr_rapp_cnt--;
1592 1593 1594 1595 1596 1597 1598
				if (phba->lpfc_injerr_rapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1599
				rc = BG_ERR_INIT;
1600 1601

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1602
					"0814 BLKGRD: Injecting apptag error: "
1603
					"read lba x%lx\n", (unsigned long)lba);
1604
				break;
1605 1606 1607 1608
			}
		}
	}

1609

1610
	/* Should we change the Guard Tag */
1611 1612 1613 1614
	if (new_guard) {
		if (phba->lpfc_injerr_wgrd_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1615
				rc = BG_ERR_CHECK;
1616
				/* Drop thru */
1617 1618

			case SCSI_PROT_WRITE_INSERT:
1619
				/*
1620 1621 1622
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1623 1624
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1625 1626 1627 1628 1629 1630 1631
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1632

1633
				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1634
				/* Signals the caller to swap CRC->CSUM */
1635

1636
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1637
					"0817 BLKGRD: Injecting guard error: "
1638 1639
					"write lba x%lx\n", (unsigned long)lba);
				break;
1640
			case SCSI_PROT_WRITE_STRIP:
1641
				/*
1642 1643 1644
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1645 1646
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1647 1648 1649 1650 1651 1652 1653
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1654

1655
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1656 1657 1658
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1659
					"0816 BLKGRD: Injecting guard error: "
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
					"write lba x%lx\n", (unsigned long)lba);
				break;
			}
		}
		if (phba->lpfc_injerr_rgrd_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
				phba->lpfc_injerr_rgrd_cnt--;
1675 1676 1677 1678 1679 1680 1681
				if (phba->lpfc_injerr_rgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1682

1683
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1684 1685 1686 1687 1688 1689
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"0818 BLKGRD: Injecting guard error: "
					"read lba x%lx\n", (unsigned long)lba);
			}
1690 1691
		}
	}
1692

1693 1694 1695 1696
	return rc;
}
#endif

1697 1698 1699 1700
/**
 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
 * the specified SCSI command.
 * @phba: The Hba for which this call is being executed.
1701 1702 1703 1704 1705 1706
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
1707
 **/
1708
static int
1709 1710
lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
1711
{
1712
	uint8_t ret = 0;
1713

1714
	if (lpfc_cmd_guard_csum(sc)) {
1715 1716 1717
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
1718
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1719
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1720 1721 1722 1723
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1724
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1725
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1726 1727
			break;

1728 1729
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1730
			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1731
			*txop = BG_OP_IN_CSUM_OUT_CRC;
1732 1733 1734 1735
			break;

		case SCSI_PROT_NORMAL:
		default:
1736
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1737 1738
				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
					scsi_get_prot_op(sc));
1739
			ret = 1;
1740 1741 1742
			break;

		}
J
James Smart 已提交
1743
	} else {
1744 1745 1746
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1747
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1748
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1749 1750 1751 1752
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1753
			*rxop = BG_OP_IN_CRC_OUT_CRC;
1754
			*txop = BG_OP_IN_CRC_OUT_CRC;
1755 1756 1757 1758
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
J
James Smart 已提交
1759
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1760
			*txop = BG_OP_IN_CRC_OUT_NODIF;
J
James Smart 已提交
1761 1762
			break;

1763 1764
		case SCSI_PROT_NORMAL:
		default:
1765
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1766 1767
				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
					scsi_get_prot_op(sc));
1768
			ret = 1;
1769 1770 1771 1772
			break;
		}
	}

1773
	return ret;
1774 1775
}

1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
 * the specified SCSI command in order to force a guard tag error.
 * @phba: The Hba for which this call is being executed.
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
 **/
static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
{
	uint8_t ret = 0;

1794
	if (lpfc_cmd_guard_csum(sc)) {
1795 1796 1797 1798
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1799
			*txop = BG_OP_IN_CRC_OUT_NODIF;
1800 1801 1802 1803 1804
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1805
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1806 1807 1808 1809
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1810
			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1811
			*txop = BG_OP_IN_CRC_OUT_CSUM;
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;

		}
	} else {
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1824
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1825 1826 1827 1828
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1829
			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1830
			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1831 1832 1833 1834 1835
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1836
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;
		}
	}

	return ret;
}
#endif

/**
 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
1857 1858 1859 1860 1861 1862 1863 1864 1865
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
1866 1867 1868
 *   start of prot group  -->     |          PDE_5          |
 *                                +-------------------------+
 *                                |          PDE_6          |
1869 1870 1871 1872 1873 1874 1875 1876
 *                                +-------------------------+
 *                                |         Data BDE        |
 *                                +-------------------------+
 *                                |more Data BDE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
1877 1878 1879
 *
 * Returns the number of BDEs added to the BPL.
 **/
1880 1881 1882 1883 1884
static int
lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
1885 1886
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
1887
	dma_addr_t physaddr;
1888
	int i = 0, num_bde = 0, status;
1889
	int datadir = sc->sc_data_direction;
1890
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1891
	uint32_t rc;
1892
#endif
1893
	uint32_t checking = 1;
1894
	uint32_t reftag;
1895
	uint8_t txop, rxop;
1896

1897 1898
	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
1899 1900
		goto out;

1901
	/* extract some info from the scsi command for pde*/
1902
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1903

1904
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1905
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1906
	if (rc) {
1907
		if (rc & BG_ERR_SWAP)
1908
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1909
		if (rc & BG_ERR_CHECK)
1910 1911
			checking = 0;
	}
1912 1913
#endif

1914 1915 1916 1917 1918
	/* setup PDE5 with what we have */
	pde5 = (struct lpfc_pde5 *) bpl;
	memset(pde5, 0, sizeof(struct lpfc_pde5));
	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

1919
	/* Endianness conversion if necessary for PDE5 */
1920
	pde5->word0 = cpu_to_le32(pde5->word0);
J
James Smart 已提交
1921
	pde5->reftag = cpu_to_le32(reftag);
1922

1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
	/* advance bpl and increment bde count */
	num_bde++;
	bpl++;
	pde6 = (struct lpfc_pde6 *) bpl;

	/* setup PDE6 with the rest of the info */
	memset(pde6, 0, sizeof(struct lpfc_pde6));
	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
	bf_set(pde6_optx, pde6, txop);
	bf_set(pde6_oprx, pde6, rxop);
1933 1934 1935 1936 1937

	/*
	 * We only need to check the data on READs, for WRITEs
	 * protection data is automatically generated, not checked.
	 */
1938
	if (datadir == DMA_FROM_DEVICE) {
1939
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1940 1941 1942 1943
			bf_set(pde6_ce, pde6, checking);
		else
			bf_set(pde6_ce, pde6, 0);

1944
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1945 1946 1947
			bf_set(pde6_re, pde6, checking);
		else
			bf_set(pde6_re, pde6, 0);
1948 1949
	}
	bf_set(pde6_ai, pde6, 1);
J
James Smart 已提交
1950 1951
	bf_set(pde6_ae, pde6, 0);
	bf_set(pde6_apptagval, pde6, 0);
1952

1953
	/* Endianness conversion if necessary for PDE6 */
1954 1955 1956 1957
	pde6->word0 = cpu_to_le32(pde6->word0);
	pde6->word1 = cpu_to_le32(pde6->word1);
	pde6->word2 = cpu_to_le32(pde6->word2);

1958
	/* advance bpl and increment bde count */
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
	num_bde++;
	bpl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
		bpl->tus.f.bdeSize = sg_dma_len(sgde);
		if (datadir == DMA_TO_DEVICE)
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		else
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
		bpl->tus.w = le32_to_cpu(bpl->tus.w);
		bpl++;
		num_bde++;
	}

out:
	return num_bde;
}

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
/**
 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
1991 1992 1993 1994 1995 1996 1997 1998 1999
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
2000
 *   start of first prot group  -->   |          PDE_5          |
2001
 *                                    +-------------------------+
2002 2003 2004
 *                                    |          PDE_6          |
 *                                    +-------------------------+
 *                                    |      PDE_7 (Prot BDE)   |
2005 2006 2007 2008 2009
 *                                    +-------------------------+
 *                                    |        Data BDE         |
 *                                    +-------------------------+
 *                                    |more Data BDE's ... (opt)|
 *                                    +-------------------------+
2010
 *   start of new  prot group  -->    |          PDE_5          |
2011 2012 2013 2014 2015 2016
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
2017 2018 2019
 *
 * Returns the number of BDEs added to the BPL.
 **/
2020 2021 2022 2023 2024 2025
static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2026 2027
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
2028
	struct lpfc_pde7 *pde7 = NULL;
2029 2030
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
2031 2032
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2033 2034
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
2035
	int status;
2036 2037 2038
	int datadir = sc->sc_data_direction;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
2039
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2040
	uint32_t rc;
2041
#endif
2042
	uint32_t checking = 1;
2043
	uint32_t reftag;
2044
	uint8_t txop, rxop;
2045 2046 2047 2048 2049 2050 2051
	int num_bde = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065
				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
				sgpe, sgde);
		return 0;
	}

	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command */
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2066
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2067
	if (rc) {
2068
		if (rc & BG_ERR_SWAP)
2069
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2070
		if (rc & BG_ERR_CHECK)
2071 2072 2073 2074 2075 2076
			checking = 0;
	}
#endif

	split_offset = 0;
	do {
2077 2078 2079 2080
		/* Check to see if we ran out of space */
		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
			return num_bde + 3;

2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099
		/* setup PDE5 with what we have */
		pde5 = (struct lpfc_pde5 *) bpl;
		memset(pde5, 0, sizeof(struct lpfc_pde5));
		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

		/* Endianness conversion if necessary for PDE5 */
		pde5->word0 = cpu_to_le32(pde5->word0);
		pde5->reftag = cpu_to_le32(reftag);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;
		pde6 = (struct lpfc_pde6 *) bpl;

		/* setup PDE6 with the rest of the info */
		memset(pde6, 0, sizeof(struct lpfc_pde6));
		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
		bf_set(pde6_optx, pde6, txop);
		bf_set(pde6_oprx, pde6, rxop);
2100

2101
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2102 2103 2104 2105
			bf_set(pde6_ce, pde6, checking);
		else
			bf_set(pde6_ce, pde6, 0);

2106
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2107 2108 2109 2110
			bf_set(pde6_re, pde6, checking);
		else
			bf_set(pde6_re, pde6, 0);

2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
		bf_set(pde6_ai, pde6, 1);
		bf_set(pde6_ae, pde6, 0);
		bf_set(pde6_apptagval, pde6, 0);

		/* Endianness conversion if necessary for PDE6 */
		pde6->word0 = cpu_to_le32(pde6->word0);
		pde6->word1 = cpu_to_le32(pde6->word1);
		pde6->word2 = cpu_to_le32(pde6->word2);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;

		/* setup the first BDE that points to protection buffer */
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

		pde7 = (struct lpfc_pde7 *) bpl;
		memset(pde7, 0, sizeof(struct lpfc_pde7));
		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);

		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));

		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

		/* check if this pde is crossing the 4K boundary; if so split */
		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
			protgrp_bytes = protgrp_blks * blksize;
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}

		num_bde++;

		/* setup BDE's for data blocks associated with DIF data */
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2158 2159 2160 2161
			/* Check to see if we ran out of space */
			if (num_bde >= phba->cfg_total_seg_cnt)
				return num_bde + 1;

2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
			if (!sgde) {
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9065 BLKGRD:%s Invalid data segment\n",
						__func__);
				return 0;
			}
			bpl++;
			dataphysaddr = sg_dma_address(sgde) + split_offset;
			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
				bpl->tus.f.bdeSize = remainder;
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
				split_offset += bpl->tus.f.bdeSize;
			}

			subtotal += bpl->tus.f.bdeSize;

			if (datadir == DMA_TO_DEVICE)
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			else
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
			bpl->tus.w = le32_to_cpu(bpl->tus.w);

			num_bde++;
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);

		}

		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
			bpl++;
			continue;
		}

		/* are we done ? */
		if (curr_prot == protcnt) {
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
			bpl++;

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9054 BLKGRD: bug in %s\n", __func__);
		}

	} while (!alldone);
out:

	return num_bde;
}

/**
 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
 *   start of prot group  -->     |         DI_SEED         |
 *                                +-------------------------+
 *                                |         Data SGE        |
 *                                +-------------------------+
 *                                |more Data SGE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t physaddr;
	int i = 0, num_sge = 0, status;
	uint32_t reftag;
	uint8_t txop, rxop;
2275
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2276
	uint32_t rc;
2277
#endif
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
	uint32_t checking = 1;
	uint32_t dma_len;
	uint32_t dma_offset = 0;

	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command for pde*/
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2290
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2291
	if (rc) {
2292
		if (rc & BG_ERR_SWAP)
2293
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2294
		if (rc & BG_ERR_CHECK)
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
			checking = 0;
	}
#endif

	/* setup DISEED with what we have */
	diseed = (struct sli4_sge_diseed *) sgl;
	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

	/* Endianness conversion if necessary */
	diseed->ref_tag = cpu_to_le32(reftag);
	diseed->ref_tag_tran = diseed->ref_tag;

2308 2309 2310 2311 2312
	/*
	 * We only need to check the data on READs, for WRITEs
	 * protection data is automatically generated, not checked.
	 */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2313
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2314 2315 2316 2317
			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);

2318
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2319 2320 2321 2322 2323
			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
	}

2324 2325 2326
	/* setup DISEED with the rest of the info */
	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2327

2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

	/* Endianness conversion if necessary for DISEED */
	diseed->word2 = cpu_to_le32(diseed->word2);
	diseed->word3 = cpu_to_le32(diseed->word3);

	/* advance bpl and increment sge count */
	num_sge++;
	sgl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		dma_len = sg_dma_len(sgde);
		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
		if ((i + 1) == datasegcnt)
			bf_set(lpfc_sli4_sge_last, sgl, 1);
		else
			bf_set(lpfc_sli4_sge_last, sgl, 0);
		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);

		sgl->sge_len = cpu_to_le32(dma_len);
		dma_offset += dma_len;

		sgl++;
		num_sge++;
	}

out:
	return num_sge;
}

/**
 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
 *   start of first prot group  -->   |         DISEED          |
 *                                    +-------------------------+
 *                                    |      DIF (Prot SGE)     |
 *                                    +-------------------------+
 *                                    |        Data SGE         |
 *                                    +-------------------------+
 *                                    |more Data SGE's ... (opt)|
 *                                    +-------------------------+
 *   start of new  prot group  -->    |         DISEED          |
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
	int status;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
	uint32_t reftag;
	uint8_t txop, rxop;
	uint32_t dma_len;
2419
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2420
	uint32_t rc;
2421
#endif
2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
	uint32_t checking = 1;
	uint32_t dma_offset = 0;
	int num_sge = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2432 2433 2434 2435
				sgpe, sgde);
		return 0;
	}

2436 2437
	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
2438 2439
		goto out;

2440
	/* extract some info from the scsi command */
2441
	blksize = lpfc_cmd_blksize(sc);
2442
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2443

2444
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2445
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2446
	if (rc) {
2447
		if (rc & BG_ERR_SWAP)
2448
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2449
		if (rc & BG_ERR_CHECK)
2450 2451
			checking = 0;
	}
2452 2453
#endif

2454 2455
	split_offset = 0;
	do {
2456 2457 2458 2459
		/* Check to see if we ran out of space */
		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
			return num_sge + 3;

2460 2461 2462 2463 2464 2465 2466 2467 2468
		/* setup DISEED with what we have */
		diseed = (struct sli4_sge_diseed *) sgl;
		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

		/* Endianness conversion if necessary */
		diseed->ref_tag = cpu_to_le32(reftag);
		diseed->ref_tag_tran = diseed->ref_tag;

2469
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);

		} else {
			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
			/*
			 * When in this mode, the hardware will replace
			 * the guard tag from the host with a
			 * newly generated good CRC for the wire.
			 * Switch to raw mode here to avoid this
			 * behavior. What the host sends gets put on the wire.
			 */
			if (txop == BG_OP_IN_CRC_OUT_CRC) {
				txop = BG_OP_RAW_MODE;
				rxop = BG_OP_RAW_MODE;
			}
		}


2488
		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2489 2490 2491 2492
			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		else
			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);

2493 2494 2495
		/* setup DISEED with the rest of the info */
		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2496

2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

		/* Endianness conversion if necessary for DISEED */
		diseed->word2 = cpu_to_le32(diseed->word2);
		diseed->word3 = cpu_to_le32(diseed->word3);

		/* advance sgl and increment bde count */
		num_sge++;
		sgl++;
2507 2508

		/* setup the first BDE that points to protection buffer */
2509 2510
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2511 2512 2513 2514

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

2515 2516 2517 2518 2519 2520
		/* Now setup DIF SGE */
		sgl->word2 = 0;
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
		sgl->word2 = cpu_to_le32(sgl->word2);
2521

2522 2523 2524
		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

2525 2526 2527
		/* check if DIF SGE is crossing the 4K boundary; if so split */
		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2528 2529
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
J
James Smart 已提交
2530
			protgrp_bytes = protgrp_blks * blksize;
2531 2532 2533 2534
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}
2535

2536
		num_sge++;
2537

2538
		/* setup SGE's for data blocks associated with DIF data */
2539 2540 2541
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2542 2543 2544 2545
			/* Check to see if we ran out of space */
			if (num_sge >= phba->cfg_total_seg_cnt)
				return num_sge + 1;

2546
			if (!sgde) {
2547
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2548
					"9086 BLKGRD:%s Invalid data segment\n",
2549 2550 2551
						__func__);
				return 0;
			}
2552
			sgl++;
2553 2554 2555 2556 2557 2558
			dataphysaddr = sg_dma_address(sgde) + split_offset;

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
2559
				dma_len = remainder;
2560 2561 2562 2563 2564 2565
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
2566 2567
				dma_len = protgrp_bytes - subtotal;
				split_offset += dma_len;
2568 2569
			}

2570
			subtotal += dma_len;
2571

2572 2573 2574 2575 2576
			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
			bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2577

2578 2579 2580 2581
			sgl->sge_len = cpu_to_le32(dma_len);
			dma_offset += dma_len;

			num_sge++;
2582 2583 2584 2585 2586 2587 2588 2589 2590
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);
		}

2591 2592 2593
		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
2594
			sgl++;
2595 2596 2597
			continue;
		}

2598 2599
		/* are we done ? */
		if (curr_prot == protcnt) {
2600
			bf_set(lpfc_sli4_sge_last, sgl, 1);
2601 2602 2603 2604
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
2605
			sgl++;
2606 2607 2608 2609 2610

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
2611
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2612
				"9085 BLKGRD: bug in %s\n", __func__);
2613 2614 2615
		}

	} while (!alldone);
2616

2617 2618
out:

2619
	return num_sge;
2620
}
2621

2622 2623 2624 2625 2626
/**
 * lpfc_prot_group_type - Get prtotection group type of SCSI command
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 *
2627 2628 2629
 * Given a SCSI command that supports DIF, determine composition of protection
 * groups involved in setting up buffer lists
 *
2630 2631 2632
 * Returns: Protection group type (with or without DIF)
 *
 **/
2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
{
	int ret = LPFC_PG_TYPE_INVALID;
	unsigned char op = scsi_get_prot_op(sc);

	switch (op) {
	case SCSI_PROT_READ_STRIP:
	case SCSI_PROT_WRITE_INSERT:
		ret = LPFC_PG_TYPE_NO_DIF;
		break;
	case SCSI_PROT_READ_INSERT:
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_PASS:
	case SCSI_PROT_WRITE_PASS:
		ret = LPFC_PG_TYPE_DIF_BUF;
		break;
	default:
2651 2652 2653 2654
		if (phba)
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9021 Unsupported protection op:%d\n",
					op);
2655 2656 2657 2658 2659
		break;
	}
	return ret;
}

2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
/**
 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
 *
 * Adjust the data length to account for how much data
 * is actually on the wire.
 *
 * returns the adjusted data length
 **/
static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
		       struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
	int fcpdl;

	fcpdl = scsi_bufflen(sc);

	/* Check if there is protection data on the wire */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2681
		/* Read check for protection data */
2682 2683 2684 2685
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
			return fcpdl;

	} else {
2686
		/* Write check for protection data */
2687 2688 2689 2690 2691 2692
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
			return fcpdl;
	}

	/*
	 * If we are in DIF Type 1 mode every data block has a 8 byte
2693 2694
	 * DIF (trailer) attached to it. Must ajust FCP data length
	 * to account for the protection data.
2695
	 */
2696
	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2697 2698 2699 2700

	return fcpdl;
}

2701 2702 2703 2704 2705
/**
 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
 *
2706 2707 2708
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
2709
 **/
2710
static int
2711
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2712 2713 2714 2715 2716 2717 2718 2719 2720
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	uint32_t num_bde = 0;
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
2721
	int fcpdl;
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741

	/*
	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
	 *  fcp_rsp regions to the first data bde entry
	 */
	bpl += 2;
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		lpfc_cmd->seg_cnt = datasegcnt;
2742 2743 2744 2745

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
2746 2747 2748 2749 2750

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
2751 2752 2753 2754 2755

			/* Here we need to add a PDE5 and PDE6 to the count */
			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
				goto err;

2756 2757
			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
					datasegcnt);
2758
			/* we should have 2 or more entries in buffer list */
2759 2760 2761
			if (num_bde < 2)
				goto err;
			break;
2762 2763

		case LPFC_PG_TYPE_DIF_BUF:
2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
2778 2779 2780 2781 2782 2783 2784 2785

			/*
			 * There is a minimun of 4 BPLs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 4) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
2786 2787 2788

			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
					datasegcnt, protsegcnt);
2789
			/* we should have 3 or more entries in buffer list */
2790 2791
			if ((num_bde < 3) ||
			    (num_bde > phba->cfg_total_seg_cnt))
2792 2793
				goto err;
			break;
2794

2795 2796
		case LPFC_PG_TYPE_INVALID:
		default:
2797 2798 2799
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9022 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
	 * reinitialized since all iocb memory resources are used many times
	 * for transmit, receive, and continuation bpl's.
	 */
	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
	iocb_cmd->ulpBdeCount = 1;
	iocb_cmd->ulpLe = 1;

2818
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2819 2820 2821 2822 2823 2824 2825 2826
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

已提交
2827
	return 0;
2828
err:
2829 2830 2831 2832 2833 2834 2835
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

2836
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2837 2838 2839 2840
			"9023 Cannot setup S/G List for HBA"
			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2841
			prot_group_type, num_bde);
2842 2843 2844

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
2845 2846 2847
	return 1;
}

2848 2849 2850 2851 2852
/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CRC algorithmn
 * using crc_t10dif.
 */
2853
static uint16_t
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868
lpfc_bg_crc(uint8_t *data, int count)
{
	uint16_t crc = 0;
	uint16_t x;

	crc = crc_t10dif(data, count);
	x = cpu_to_be16(crc);
	return x;
}

/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CSUM algorithmn
 * using ip_compute_csum.
 */
2869
static uint16_t
2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
lpfc_bg_csum(uint8_t *data, int count)
{
	uint16_t ret;

	ret = ip_compute_csum(data, count);
	return ret;
}

/*
 * This function examines the protection data to try to determine
 * what type of T10-DIF error occurred.
 */
2882
static void
2883 2884 2885 2886 2887 2888 2889
lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct scsi_dif_tuple *src = NULL;
	uint8_t *data_src = NULL;
2890
	uint16_t guard_tag;
2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931
	uint16_t start_app_tag, app_tag;
	uint32_t start_ref_tag, ref_tag;
	int prot, protsegcnt;
	int err_type, len, data_len;
	int chk_ref, chk_app, chk_guard;
	uint16_t sum;
	unsigned blksize;

	err_type = BGS_GUARD_ERR_MASK;
	sum = 0;
	guard_tag = 0;

	/* First check to see if there is protection data to examine */
	prot = scsi_get_prot_op(cmd);
	if ((prot == SCSI_PROT_READ_STRIP) ||
	    (prot == SCSI_PROT_WRITE_INSERT) ||
	    (prot == SCSI_PROT_NORMAL))
		goto out;

	/* Currently the driver just supports ref_tag and guard_tag checking */
	chk_ref = 1;
	chk_app = 0;
	chk_guard = 0;

	/* Setup a ptr to the protection data provided by the SCSI host */
	sgpe = scsi_prot_sglist(cmd);
	protsegcnt = lpfc_cmd->prot_seg_cnt;

	if (sgpe && protsegcnt) {

		/*
		 * We will only try to verify guard tag if the segment
		 * data length is a multiple of the blksize.
		 */
		sgde = scsi_sglist(cmd);
		blksize = lpfc_cmd_blksize(cmd);
		data_src = (uint8_t *)sg_virt(sgde);
		data_len = sgde->length;
		if ((data_len & (blksize - 1)) == 0)
			chk_guard = 1;

2932
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2933
		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2934 2935 2936 2937 2938 2939 2940 2941 2942
		start_app_tag = src->app_tag;
		len = sgpe->length;
		while (src && protsegcnt) {
			while (len) {

				/*
				 * First check to see if a protection data
				 * check is valid
				 */
2943 2944
				if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
				    (src->app_tag == T10_PI_APP_ESCAPE)) {
2945 2946 2947 2948
					start_ref_tag++;
					goto skipit;
				}

2949
				/* First Guard Tag checking */
2950 2951
				if (chk_guard) {
					guard_tag = src->guard_tag;
2952
					if (lpfc_cmd_guard_csum(cmd))
2953 2954 2955 2956 2957 2958 2959 2960 2961 2962
						sum = lpfc_bg_csum(data_src,
								   blksize);
					else
						sum = lpfc_bg_crc(data_src,
								  blksize);
					if ((guard_tag != sum)) {
						err_type = BGS_GUARD_ERR_MASK;
						goto out;
					}
				}
2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977

				/* Reference Tag checking */
				ref_tag = be32_to_cpu(src->ref_tag);
				if (chk_ref && (ref_tag != start_ref_tag)) {
					err_type = BGS_REFTAG_ERR_MASK;
					goto out;
				}
				start_ref_tag++;

				/* App Tag checking */
				app_tag = src->app_tag;
				if (chk_app && (app_tag != start_app_tag)) {
					err_type = BGS_APPTAG_ERR_MASK;
					goto out;
				}
2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
skipit:
				len -= sizeof(struct scsi_dif_tuple);
				if (len < 0)
					len = 0;
				src++;

				data_src += blksize;
				data_len -= blksize;

				/*
				 * Are we at the end of the Data segment?
				 * The data segment is only used for Guard
				 * tag checking.
				 */
				if (chk_guard && (data_len == 0)) {
					chk_guard = 0;
					sgde = sg_next(sgde);
					if (!sgde)
						goto out;

					data_src = (uint8_t *)sg_virt(sgde);
					data_len = sgde->length;
					if ((data_len & (blksize - 1)) == 0)
						chk_guard = 1;
				}
			}

			/* Goto the next Protection data segment */
			sgpe = sg_next(sgpe);
			if (sgpe) {
				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
				len = sgpe->length;
			} else {
				src = NULL;
			}
			protsegcnt--;
		}
	}
out:
	if (err_type == BGS_GUARD_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x1);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				sum, guard_tag);

	} else if (err_type == BGS_REFTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x3);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				ref_tag, start_ref_tag);

	} else if (err_type == BGS_APPTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x2);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				app_tag, start_app_tag);
	}
}


3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
/*
 * This function checks for BlockGuard errors detected by
 * the HBA.  In case of errors, the ASC/ASCQ fields in the
 * sense buffer will be set accordingly, paired with
 * ILLEGAL_REQUEST to signal to the kernel that the HBA
 * detected corruption.
 *
 * Returns:
 *  0 - No error found
 *  1 - BlockGuard error found
 * -1 - Internal error (bad profile, ...etc)
 */
static int
lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
			struct lpfc_iocbq *pIocbOut)
{
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
	int ret = 0;
	uint32_t bghm = bgf->bghm;
	uint32_t bgstat = bgf->bgstat;
	uint64_t failing_sector = 0;

	spin_lock(&_dump_buf_lock);
	if (!_dump_buf_done) {
3080 3081
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
			" Data for %u blocks to debugfs\n",
3082
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3083
		lpfc_debug_save_data(phba, cmd);
3084 3085 3086 3087

		/* If we have a prot sgl, save the DIF buffer */
		if (lpfc_prot_group_type(phba, cmd) ==
				LPFC_PG_TYPE_DIF_BUF) {
3088 3089 3090 3091
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
				"Saving DIF for %u blocks to debugfs\n",
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
			lpfc_debug_save_dif(phba, cmd);
3092 3093 3094 3095 3096 3097 3098 3099
		}

		_dump_buf_done = 1;
	}
	spin_unlock(&_dump_buf_lock);

	if (lpfc_bgs_get_invalid_prof(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3100 3101 3102 3103 3104 3105
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9072 BLKGRD: Invalid BG Profile in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3106 3107 3108 3109 3110 3111
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3112 3113 3114 3115 3116 3117
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3118 3119 3120 3121 3122 3123 3124 3125 3126
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_guard_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x1);
M
Martin K. Petersen 已提交
3127
		cmd->result = DRIVER_SENSE << 24
3128 3129
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
3130 3131 3132 3133 3134 3135
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9055 BLKGRD: Guard Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3136 3137 3138 3139 3140 3141 3142
	}

	if (lpfc_bgs_get_reftag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x3);
M
Martin K. Petersen 已提交
3143
		cmd->result = DRIVER_SENSE << 24
3144 3145 3146
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
3147 3148 3149 3150 3151 3152
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9056 BLKGRD: Ref Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3153 3154 3155 3156 3157 3158 3159
	}

	if (lpfc_bgs_get_apptag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x2);
M
Martin K. Petersen 已提交
3160
		cmd->result = DRIVER_SENSE << 24
3161 3162 3163
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
3164 3165 3166 3167 3168 3169
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9061 BLKGRD: App Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3170 3171 3172 3173 3174
	}

	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
		/*
		 * setup sense data descriptor 0 per SPC-4 as an information
J
James Smart 已提交
3175 3176 3177
		 * field, and put the failing LBA in it.
		 * This code assumes there was also a guard/app/ref tag error
		 * indication.
3178
		 */
J
James Smart 已提交
3179 3180 3181 3182
		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197

		/* bghm is a "on the wire" FC frame based count */
		switch (scsi_get_prot_op(cmd)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			bghm /= cmd->device->sector_size;
			break;
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
			bghm /= (cmd->device->sector_size +
				sizeof(struct scsi_dif_tuple));
			break;
		}
3198 3199 3200 3201

		failing_sector = scsi_get_lba(cmd);
		failing_sector += bghm;

J
James Smart 已提交
3202 3203
		/* Descriptor Information */
		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3204 3205 3206 3207
	}

	if (!ret) {
		/* No error was reported - problem in FW? */
3208 3209 3210 3211 3212 3213 3214 3215 3216
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9057 BLKGRD: Unknown error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);

		/* Calcuate what type of error it was */
		lpfc_calc_bg_err(phba, lpfc_cmd);
3217 3218 3219
	}
out:
	return ret;
已提交
3220 3221
}

3222 3223 3224 3225 3226 3227 3228 3229 3230
/**
 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 * field of @lpfc_cmd for device with SLI-4 interface spec.
 *
 * Return codes:
3231 3232
 *	1 - Error
 *	0 - Success
3233 3234 3235 3236 3237 3238 3239 3240
 **/
static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3241
	struct sli4_sge *first_data_sgl;
3242 3243 3244 3245 3246 3247
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	dma_addr_t physaddr;
	uint32_t num_bde = 0;
	uint32_t dma_len;
	uint32_t dma_offset = 0;
	int nseg;
3248
	struct ulp_bde64 *bde;
3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

		nseg = scsi_dma_map(scsi_cmnd);
J
James Smart 已提交
3265
		if (unlikely(nseg <= 0))
3266 3267 3268 3269 3270 3271 3272
			return 1;
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
		sgl += 1;
3273
		first_data_sgl = sgl;
3274 3275
		lpfc_cmd->seg_cnt = nseg;
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3276 3277 3278 3279
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
				" %s: Too many sg segments from "
				"dma_map_sg.  Config %d, seg_cnt %d\n",
				__func__, phba->cfg_sg_seg_cnt,
3280
			       lpfc_cmd->seg_cnt);
3281
			lpfc_cmd->seg_cnt = 0;
3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299
			scsi_dma_unmap(scsi_cmnd);
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the sge's.
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
		 */
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
			physaddr = sg_dma_address(sgel);
			dma_len = sg_dma_len(sgel);
			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3300
			sgl->word2 = le32_to_cpu(sgl->word2);
3301 3302 3303 3304 3305
			if ((num_bde + 1) == nseg)
				bf_set(lpfc_sli4_sge_last, sgl, 1);
			else
				bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3306
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3307
			sgl->word2 = cpu_to_le32(sgl->word2);
3308
			sgl->sge_len = cpu_to_le32(dma_len);
3309 3310 3311
			dma_offset += dma_len;
			sgl++;
		}
3312 3313
		/*
		 * Setup the first Payload BDE. For FCoE we just key off
3314 3315
		 * Performance Hints, for FC we use lpfc_enable_pbde.
		 * We populate words 13-15 of IOCB/WQE.
3316 3317
		 */
		if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3318
		    phba->cfg_enable_pbde) {
3319
			bde = (struct ulp_bde64 *)
3320
				&(iocb_cmd->unsli3.sli3Words[5]);
3321 3322 3323 3324 3325 3326 3327
			bde->addrLow = first_data_sgl->addr_lo;
			bde->addrHigh = first_data_sgl->addr_hi;
			bde->tus.f.bdeSize =
					le32_to_cpu(first_data_sgl->sge_len);
			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			bde->tus.w = cpu_to_le32(bde->tus.w);
		}
3328 3329 3330 3331 3332 3333
	} else {
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
3334 3335 3336 3337 3338 3339 3340

		if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
		    phba->cfg_enable_pbde) {
			bde = (struct ulp_bde64 *)
				&(iocb_cmd->unsli3.sli3Words[5]);
			memset(bde, 0, (sizeof(uint32_t) * 3));
		}
3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized.
	 * all iocb memory resources are reused.
	 */
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3356 3357 3358 3359 3360

	/*
	 * If the OAS driver feature is enabled and the lun is enabled for
	 * OAS, set the oas iocb related flags.
	 */
3361
	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3362
		scsi_cmnd->device->hostdata)->oas_enabled) {
3363
		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3364 3365 3366
		lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
			scsi_cmnd->device->hostdata)->priority;
	}
3367 3368 3369
	return 0;
}

3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386
/**
 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
 **/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3387
	uint32_t num_sge = 0;
3388 3389 3390 3391 3392 3393
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
	int fcpdl;

	/*
	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3394
	 *  fcp_rsp regions to the first data sge entry
3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);

		sgl += 1;
		lpfc_cmd->seg_cnt = datasegcnt;
3417 3418 3419 3420

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
3421 3422 3423 3424 3425

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
3426 3427 3428 3429 3430
			/* Here we need to add a DISEED to the count */
			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
				goto err;

			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3431
					datasegcnt);
3432

3433
			/* we should have 2 or more entries in buffer list */
3434
			if (num_sge < 2)
3435 3436
				goto err;
			break;
3437 3438

		case LPFC_PG_TYPE_DIF_BUF:
3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
3453 3454 3455 3456 3457 3458 3459
			/*
			 * There is a minimun of 3 SGEs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 3) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
3460

3461
			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3462
					datasegcnt, protsegcnt);
3463

3464
			/* we should have 3 or more entries in buffer list */
3465 3466
			if ((num_sge < 3) ||
			    (num_sge > phba->cfg_total_seg_cnt))
3467 3468
				goto err;
			break;
3469

3470 3471
		case LPFC_PG_TYPE_INVALID:
		default:
3472 3473 3474
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

3475 3476 3477 3478 3479 3480 3481
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9083 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496
	switch (scsi_get_prot_op(scsi_cmnd)) {
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_STRIP:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
		break;
	case SCSI_PROT_WRITE_INSERT:
	case SCSI_PROT_READ_INSERT:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
		break;
	case SCSI_PROT_WRITE_PASS:
	case SCSI_PROT_READ_PASS:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
		break;
	}

3497 3498 3499 3500 3501 3502 3503 3504 3505
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

3506 3507 3508 3509 3510 3511 3512 3513
	/*
	 * If the OAS driver feature is enabled and the lun is enabled for
	 * OAS, set the oas iocb related flags.
	 */
	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
		scsi_cmnd->device->hostdata)->oas_enabled)
		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);

3514 3515
	return 0;
err:
3516 3517 3518 3519 3520 3521 3522
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

3523
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3524 3525 3526 3527 3528 3529 3530 3531
			"9084 Cannot setup S/G List for HBA"
			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
			prot_group_type, num_sge);

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
3532 3533 3534
	return 1;
}

3535 3536 3537 3538 3539 3540 3541 3542 3543
/**
 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
3544 3545
 *	1 - Error
 *	0 - Success
3546 3547 3548 3549 3550 3551 3552
 **/
static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
/**
 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * using BlockGuard.
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
 *	1 - Error
 *	0 - Success
 **/
static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3572
/**
3573
 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
 * @phba: Pointer to hba context object.
 * @vport: Pointer to vport object.
 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
 * @rsp_iocb: Pointer to response iocb object which reported error.
 *
 * This function posts an event when there is a SCSI command reporting
 * error from the scsi device.
 **/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
	struct lpfc_fast_path_event *fast_path_evt = NULL;
	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
	unsigned long flags;

3594 3595 3596
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665
	/* If there is queuefull or busy condition send a scsi event */
	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
		(cmnd->result == SAM_STAT_BUSY)) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.scsi_evt.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.scsi_evt.subcategory =
		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
			LPFC_EVENT_CHECK_COND;
		fast_path_evt->un.check_cond_evt.scsi_event.lun =
			cmnd->device->lun;
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.check_cond_evt.sense_key =
			cmnd->sense_buffer[2] & 0xf;
		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
		     fcpi_parm &&
		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
			((scsi_status == SAM_STAT_GOOD) &&
			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
		/*
		 * If status is good or resid does not match with fcp_param and
		 * there is valid fcpi_parm, then there is a read_check error
		 */
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.read_check_error.header.event_type =
			FC_REG_FABRIC_EVENT;
		fast_path_evt->un.read_check_error.header.subcategory =
			LPFC_EVENT_FCPRDCHKERR;
		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
		fast_path_evt->un.read_check_error.fcpiparam =
			fcpi_parm;
	} else
		return;

	fast_path_evt->vport = vport;
	spin_lock_irqsave(&phba->hbalock, flags);
	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	lpfc_worker_wake_up(phba);
	return;
}
3666 3667

/**
3668
 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3669
 * @phba: The HBA for which this call is being executed.
3670 3671 3672
 * @psb: The scsi buffer which is going to be un-mapped.
 *
 * This routine does DMA un-mapping of scatter gather list of scsi command
3673
 * field of @lpfc_cmd for device with SLI-3 interface spec.
3674
 **/
3675
static void
3676
lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3677 3678 3679 3680 3681 3682 3683
{
	/*
	 * There are only two special cases to consider.  (1) the scsi command
	 * requested scatter-gather usage or (2) the scsi command allocated
	 * a request buffer, but did not request use_sg.  There is a third
	 * case, but it does not require resource deallocation.
	 */
3684 3685
	if (psb->seg_cnt > 0)
		scsi_dma_unmap(psb->pCmd);
3686 3687 3688 3689
	if (psb->prot_seg_cnt > 0)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
				scsi_prot_sg_count(psb->pCmd),
				psb->pCmd->sc_data_direction);
3690 3691
}

3692
/**
3693
 * lpfc_handler_fcp_err - FCP response handler
3694 3695 3696 3697 3698 3699 3700 3701
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @rsp_iocb: The response IOCB which contains FCP error.
 *
 * This routine is called to process response IOCB with status field
 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
 * based upon SCSI and FCP error.
 **/
已提交
3702
static void
J
James Smart 已提交
3703 3704
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
		    struct lpfc_iocbq *rsp_iocb)
已提交
3705
{
3706
	struct lpfc_hba *phba = vport->phba;
已提交
3707 3708 3709
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3710
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
已提交
3711 3712
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
3713
	uint32_t *lp;
已提交
3714 3715
	uint32_t host_status = DID_OK;
	uint32_t rsplen = 0;
3716
	uint32_t fcpDl;
3717
	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
已提交
3718

3719

已提交
3720 3721 3722 3723 3724 3725 3726 3727 3728 3729
	/*
	 *  If this is a task management command, there is no
	 *  scsi packet associated with this lpfc_cmd.  The driver
	 *  consumes it.
	 */
	if (fcpcmd->fcpCntl2) {
		scsi_status = 0;
		goto out;
	}

3730 3731
	if (resp_info & RSP_LEN_VALID) {
		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3732
		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3733 3734
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2719 Invalid response length: "
H
Hannes Reinecke 已提交
3735
				 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3736 3737 3738 3739 3740 3741
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 rsplen);
			host_status = DID_ERROR;
			goto out;
		}
3742 3743 3744 3745
		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2757 Protocol failure detected during "
				 "processing of FCP I/O op: "
H
Hannes Reinecke 已提交
3746
				 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3747 3748 3749 3750 3751 3752
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 fcprsp->rspInfo3);
			host_status = DID_ERROR;
			goto out;
		}
3753 3754
	}

3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765
	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
		if (snslen > SCSI_SENSE_BUFFERSIZE)
			snslen = SCSI_SENSE_BUFFERSIZE;

		if (resp_info & RSP_LEN_VALID)
		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
	}
	lp = (uint32_t *)cmnd->sense_buffer;

3766 3767 3768 3769 3770 3771 3772 3773 3774
	/* special handling for under run conditions */
	if (!scsi_status && (resp_info & RESID_UNDER)) {
		/* don't log under runs if fcp set... */
		if (vport->cfg_log_verbose & LOG_FCP)
			logit = LOG_FCP_ERROR;
		/* unless operator says so */
		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
			logit = LOG_FCP_UNDER;
	}
3775

3776
	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3777
			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3778 3779 3780 3781 3782 3783 3784
			 "Data: x%x x%x x%x x%x x%x\n",
			 cmnd->cmnd[0], scsi_status,
			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
			 be32_to_cpu(fcprsp->rspResId),
			 be32_to_cpu(fcprsp->rspSnsLen),
			 be32_to_cpu(fcprsp->rspRspLen),
			 fcprsp->rspInfo3);
已提交
3785

3786
	scsi_set_resid(cmnd, 0);
3787
	fcpDl = be32_to_cpu(fcpcmd->fcpDl);
已提交
3788
	if (resp_info & RESID_UNDER) {
3789
		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
已提交
3790

3791
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3792
				 "9025 FCP Underrun, expected %d, "
3793
				 "residual %d Data: x%x x%x x%x\n",
3794
				 fcpDl,
3795 3796
				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
				 cmnd->underflow);
已提交
3797

3798
		/*
3799
		 * If there is an under run, check if under run reported by
3800 3801 3802
		 * storage array is same as the under run reported by HBA.
		 * If this is not same, there is a dropped frame.
		 */
3803
		if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3804 3805
			lpfc_printf_vlog(vport, KERN_WARNING,
					 LOG_FCP | LOG_FCP_ERROR,
3806
					 "9026 FCP Read Check Error "
3807
					 "and Underrun Data: x%x x%x x%x x%x\n",
3808
					 fcpDl,
3809 3810
					 scsi_get_resid(cmnd), fcpi_parm,
					 cmnd->cmnd[0]);
3811
			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3812 3813
			host_status = DID_ERROR;
		}
已提交
3814 3815
		/*
		 * The cmnd->underflow is the minimum number of bytes that must
L
Lucas De Marchi 已提交
3816
		 * be transferred for this command.  Provided a sense condition
已提交
3817 3818 3819 3820 3821
		 * is not present, make sure the actual amount transferred is at
		 * least the underflow value or fail.
		 */
		if (!(resp_info & SNS_LEN_VALID) &&
		    (scsi_status == SAM_STAT_GOOD) &&
3822 3823
		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
		     < cmnd->underflow)) {
3824
			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3825
					 "9027 FCP command x%x residual "
3826 3827
					 "underrun converted to error "
					 "Data: x%x x%x x%x\n",
3828
					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3829
					 scsi_get_resid(cmnd), cmnd->underflow);
已提交
3830 3831 3832
			host_status = DID_ERROR;
		}
	} else if (resp_info & RESID_OVER) {
3833
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3834
				 "9028 FCP command x%x residual overrun error. "
3835
				 "Data: x%x x%x\n", cmnd->cmnd[0],
3836
				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
已提交
3837 3838 3839 3840
		host_status = DID_ERROR;

	/*
	 * Check SLI validation that all the transfer was actually done
3841
	 * (fcpi_parm should be zero). Apply check only to reads.
已提交
3842
	 */
3843
	} else if (fcpi_parm) {
3844
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3845
				 "9029 FCP %s Check Error xri x%x  Data: "
J
James Smart 已提交
3846
				 "x%x x%x x%x x%x x%x\n",
3847 3848 3849 3850 3851 3852
				 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
				 "Read" : "Write"),
				 ((phba->sli_rev == LPFC_SLI_REV4) ?
				 lpfc_cmd->cur_iocbq.sli4_xritag :
				 rsp_iocb->iocb.ulpContext),
				 fcpDl, be32_to_cpu(fcprsp->rspResId),
J
James Smart 已提交
3853
				 fcpi_parm, cmnd->cmnd[0], scsi_status);
3854 3855 3856 3857 3858 3859 3860 3861

		/* There is some issue with the LPe12000 that causes it
		 * to miscalculate the fcpi_parm and falsely trip this
		 * recovery logic.  Detect this case and don't error when true.
		 */
		if (fcpi_parm > fcpDl)
			goto out;

J
James Smart 已提交
3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872
		switch (scsi_status) {
		case SAM_STAT_GOOD:
		case SAM_STAT_CHECK_CONDITION:
			/* Fabric dropped a data frame. Fail any successful
			 * command in which we detected dropped frames.
			 * A status of good or some check conditions could
			 * be considered a successful command.
			 */
			host_status = DID_ERROR;
			break;
		}
3873
		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
已提交
3874 3875 3876 3877
	}

 out:
	cmnd->result = ScsiResult(host_status, scsi_status);
3878
	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
已提交
3879 3880
}

3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901
/**
 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
 * @phba: Pointer to HBA context object.
 *
 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
 * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
 * held.
 * If scsi-mq is enabled, get the default block layer mapping of software queues
 * to hardware queues. This information is saved in request tag.
 *
 * Return: index into SLI4 fast-path FCP queue index.
 **/
int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
				  struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct lpfc_vector_map_info *cpup;
	int chann, cpu;
	uint32_t tag;
	uint16_t hwq;

3902
	if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918
		tag = blk_mq_unique_tag(cmnd->request);
		hwq = blk_mq_unique_tag_to_hwq(tag);

		return hwq;
	}

	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
	    && phba->cfg_fcp_io_channel > 1) {
		cpu = smp_processor_id();
		if (cpu < phba->sli4_hba.num_present_cpu) {
			cpup = phba->sli4_hba.cpu_map;
			cpup += cpu;
			return cpup->channel_id;
		}
	}
	chann = atomic_add_return(1, &phba->fcp_qidx);
J
James Smart 已提交
3919
	chann = chann % phba->cfg_fcp_io_channel;
3920 3921 3922 3923
	return chann;
}


3924
/**
3925
 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3926 3927
 * @phba: The Hba for which this call is being executed.
 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3928
 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3929 3930 3931 3932 3933
 *
 * This routine assigns scsi command result by looking into response IOCB
 * status field appropriately. This routine handles QUEUE FULL condition as
 * well by ramping down device queue depth.
 **/
已提交
3934 3935 3936 3937 3938 3939
static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
			struct lpfc_iocbq *pIocbOut)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) pIocbIn->context1;
J
James Smart 已提交
3940
	struct lpfc_vport      *vport = pIocbIn->vport;
已提交
3941 3942
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
3943
	struct scsi_cmnd *cmd;
3944
	unsigned long flags;
3945
	struct lpfc_fast_path_event *fast_path_evt;
3946
	struct Scsi_Host *shost;
3947
	uint32_t logit = LOG_FCP;
已提交
3948

3949
	atomic_inc(&phba->fc4ScsiIoCmpls);
3950

3951 3952
	/* Sanity check on return of outstanding command */
	cmd = lpfc_cmd->pCmd;
3953 3954
	if (!cmd)
		return;
3955 3956
	shost = cmd->device->host;

3957
	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
已提交
3958
	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3959 3960 3961
	/* pick up SLI4 exhange busy status from HBA */
	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;

3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
	if (lpfc_cmd->prot_data_type) {
		struct scsi_dif_tuple *src = NULL;

		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
		/*
		 * Used to restore any changes to protection
		 * data for error injection.
		 */
		switch (lpfc_cmd->prot_data_type) {
		case LPFC_INJERR_REFTAG:
			src->ref_tag =
				lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_APPTAG:
			src->app_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_GUARD:
			src->guard_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		default:
			break;
		}

		lpfc_cmd->prot_data = 0;
		lpfc_cmd->prot_data_type = 0;
		lpfc_cmd->prot_data_segment = NULL;
	}
#endif
J
James Smart 已提交
3993

已提交
3994 3995 3996 3997 3998 3999
	if (lpfc_cmd->status) {
		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
		    (lpfc_cmd->result & IOERR_DRVR_MASK))
			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
		else if (lpfc_cmd->status >= IOSTAT_CNT)
			lpfc_cmd->status = IOSTAT_DEFAULT;
4000 4001 4002 4003
		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4004 4005 4006 4007
			logit = 0;
		else
			logit = LOG_FCP | LOG_FCP_UNDER;
		lpfc_printf_vlog(vport, KERN_WARNING, logit,
H
Hannes Reinecke 已提交
4008
			 "9030 FCP cmd x%x failed <%d/%lld> "
4009 4010 4011
			 "status: x%x result: x%x "
			 "sid: x%x did: x%x oxid: x%x "
			 "Data: x%x x%x\n",
4012 4013 4014 4015
			 cmd->cmnd[0],
			 cmd->device ? cmd->device->id : 0xffff,
			 cmd->device ? cmd->device->lun : 0xffff,
			 lpfc_cmd->status, lpfc_cmd->result,
4016 4017
			 vport->fc_myDID,
			 (pnode) ? pnode->nlp_DID : 0,
4018 4019
			 phba->sli_rev == LPFC_SLI_REV4 ?
			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4020 4021
			 pIocbOut->iocb.ulpContext,
			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
已提交
4022 4023 4024 4025

		switch (lpfc_cmd->status) {
		case IOSTAT_FCP_RSP_ERROR:
			/* Call FCP RSP handler to determine result */
J
James Smart 已提交
4026
			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
已提交
4027 4028 4029
			break;
		case IOSTAT_NPORT_BSY:
		case IOSTAT_FABRIC_BSY:
4030
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054
			fast_path_evt = lpfc_alloc_fast_evt(phba);
			if (!fast_path_evt)
				break;
			fast_path_evt->un.fabric_evt.event_type =
				FC_REG_FABRIC_EVENT;
			fast_path_evt->un.fabric_evt.subcategory =
				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
					&pnode->nlp_portname,
					sizeof(struct lpfc_name));
				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
					&pnode->nlp_nodename,
					sizeof(struct lpfc_name));
			}
			fast_path_evt->vport = vport;
			fast_path_evt->work_evt.evt =
				LPFC_EVT_FASTPATH_MGMT_EVT;
			spin_lock_irqsave(&phba->hbalock, flags);
			list_add_tail(&fast_path_evt->work_evt.evt_listp,
				&phba->work_list);
			spin_unlock_irqrestore(&phba->hbalock, flags);
			lpfc_worker_wake_up(phba);
已提交
4055
			break;
4056
		case IOSTAT_LOCAL_REJECT:
4057
		case IOSTAT_REMOTE_STOP:
4058 4059 4060 4061 4062 4063 4064 4065 4066
			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
				break;
			}
4067
			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4068
			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4069 4070
			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4071
				cmd->result = ScsiResult(DID_REQUEUE, 0);
4072
				break;
4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088
			}
			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
					/*
					 * This is a response for a BG enabled
					 * cmd. Parse BG error
					 */
					lpfc_parse_bg_err(phba, lpfc_cmd,
							pIocbOut);
					break;
				} else {
					lpfc_printf_vlog(vport, KERN_WARNING,
							LOG_BG,
							"9031 non-zero BGSTAT "
4089
							"on unprotected cmd\n");
4090 4091
				}
			}
4092 4093 4094 4095 4096 4097 4098 4099
			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
				&& (phba->sli_rev == LPFC_SLI_REV4)
				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
				/* This IO was aborted by the target, we don't
				 * know the rxid and because we did not send the
				 * ABTS we cannot generate and RRQ.
				 */
				lpfc_set_rrq_active(phba, pnode,
4100 4101
					lpfc_cmd->cur_iocbq.sli4_lxritag,
					0, 0);
4102
			}
4103
		/* else: fall through */
已提交
4104 4105 4106 4107 4108
		default:
			cmd->result = ScsiResult(DID_ERROR, 0);
			break;
		}

4109
		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4110
		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4111 4112
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
						 SAM_STAT_BUSY);
4113
	} else
已提交
4114 4115 4116 4117 4118
		cmd->result = ScsiResult(DID_OK, 0);

	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
		uint32_t *lp = (uint32_t *)cmd->sense_buffer;

4119
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
H
Hannes Reinecke 已提交
4120
				 "0710 Iodone <%d/%llu> cmd %p, error "
4121 4122 4123 4124
				 "x%x SNS x%x x%x Data: x%x x%x\n",
				 cmd->device->id, cmd->device->lun, cmd,
				 cmd->result, *lp, *(lp + 3), cmd->retries,
				 scsi_get_resid(cmd));
已提交
4125 4126
	}

4127
	lpfc_update_stats(phba, lpfc_cmd);
4128 4129 4130
	if (vport->cfg_max_scsicmpl_time &&
	   time_after(jiffies, lpfc_cmd->start_time +
		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4131
		spin_lock_irqsave(shost->host_lock, flags);
4132
		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4133
			atomic_dec(&pnode->cmd_pending);
4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144
			if (pnode->cmd_qdepth >
				atomic_read(&pnode->cmd_pending) &&
				(atomic_read(&pnode->cmd_pending) >
				LPFC_MIN_TGT_QDEPTH) &&
				((cmd->cmnd[0] == READ_10) ||
				(cmd->cmnd[0] == WRITE_10)))
				pnode->cmd_qdepth =
					atomic_read(&pnode->cmd_pending);

			pnode->last_change_time = jiffies;
		}
4145
		spin_unlock_irqrestore(shost->host_lock, flags);
4146
	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4147
		atomic_dec(&pnode->cmd_pending);
4148
	}
4149
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4150

4151
	spin_lock_irqsave(&phba->hbalock, flags);
4152
	lpfc_cmd->pCmd = NULL;
4153
	spin_unlock_irqrestore(&phba->hbalock, flags);
4154

4155 4156 4157
	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
	cmd->scsi_done(cmd);

4158 4159 4160 4161
	/*
	 * If there is a thread waiting for command completion
	 * wake up the thread.
	 */
4162
	spin_lock_irqsave(shost->host_lock, flags);
4163 4164
	if (lpfc_cmd->waitq)
		wake_up(lpfc_cmd->waitq);
4165
	spin_unlock_irqrestore(shost->host_lock, flags);
4166

4167
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4168 4169
}

4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187
/**
 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
 * @data: A pointer to the immediate command data portion of the IOCB.
 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
 *
 * The routine copies the entire FCP command from @fcp_cmnd to @data while
 * byte swapping the data to big endian format for transmission on the wire.
 **/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
	int i, j;
	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
	     i += sizeof(uint32_t), j++) {
		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
	}
}

4188
/**
4189
 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4190 4191 4192 4193 4194
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: The scsi command which needs to send.
 * @pnode: Pointer to lpfc_nodelist.
 *
 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4195
 * to transfer for device with SLI3 interface spec.
4196
 **/
已提交
4197
static void
4198
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
J
James Smart 已提交
4199
		    struct lpfc_nodelist *pnode)
已提交
4200
{
J
James Smart 已提交
4201
	struct lpfc_hba *phba = vport->phba;
已提交
4202 4203 4204 4205 4206
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
	int datadir = scsi_cmnd->sc_data_direction;
4207 4208
	uint8_t *ptr;
	bool sli4;
4209
	uint32_t fcpdl;
已提交
4210

4211 4212 4213
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

已提交
4214
	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4215 4216
	/* clear task management bits */
	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
已提交
4217

4218 4219
	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
			&lpfc_cmd->fcp_cmnd->fcp_lun);
已提交
4220

4221 4222 4223 4224 4225 4226 4227
	ptr = &fcp_cmnd->fcpCdb[0];
	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
		ptr += scsi_cmnd->cmd_len;
		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
	}

4228
	fcp_cmnd->fcpCntl1 = SIMPLE_Q;
已提交
4229

4230
	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4231
	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4232

已提交
4233 4234 4235 4236 4237 4238
	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
4239
	if (scsi_sg_count(scsi_cmnd)) {
已提交
4240 4241
		if (datadir == DMA_TO_DEVICE) {
			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4242
			iocb_cmd->ulpPU = PARM_READ_CHECK;
4243 4244
			if (vport->cfg_first_burst_size &&
			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
4245 4246 4247 4248 4249 4250
				fcpdl = scsi_bufflen(scsi_cmnd);
				if (fcpdl < vport->cfg_first_burst_size)
					piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
				else
					piocbq->iocb.un.fcpi.fcpi_XRdy =
						vport->cfg_first_burst_size;
4251
			}
已提交
4252
			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4253
			atomic_inc(&phba->fc4ScsiOutputRequests);
已提交
4254 4255 4256 4257
		} else {
			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
			iocb_cmd->ulpPU = PARM_READ_CHECK;
			fcp_cmnd->fcpCntl3 = READ_DATA;
4258
			atomic_inc(&phba->fc4ScsiInputRequests);
已提交
4259 4260 4261 4262 4263 4264
		}
	} else {
		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
		iocb_cmd->un.fcpi.fcpi_parm = 0;
		iocb_cmd->ulpPU = 0;
		fcp_cmnd->fcpCntl3 = 0;
4265
		atomic_inc(&phba->fc4ScsiControlRequests);
已提交
4266
	}
4267 4268 4269
	if (phba->sli_rev == 3 &&
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4270 4271 4272 4273 4274
	/*
	 * Finish initializing those IOCB fields that are independent
	 * of the scsi_cmnd request_buffer
	 */
	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4275
	if (sli4)
4276 4277
		piocbq->iocb.ulpContext =
		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
已提交
4278 4279
	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
		piocbq->iocb.ulpFCP2Rcvy = 1;
4280 4281
	else
		piocbq->iocb.ulpFCP2Rcvy = 0;
已提交
4282 4283 4284 4285 4286

	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
	piocbq->context1  = lpfc_cmd;
	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
J
James Smart 已提交
4287
	piocbq->vport = vport;
已提交
4288 4289
}

4290
/**
4291
 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4292 4293 4294 4295 4296
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @lun: Logical unit number.
 * @task_mgmt_cmd: SCSI task management command.
 *
4297 4298
 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
 * for device with SLI-3 interface spec.
4299 4300 4301 4302 4303
 *
 * Return codes:
 *   0 - Error
 *   1 - Success
 **/
已提交
4304
static int
4305
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
已提交
4306
			     struct lpfc_scsi_buf *lpfc_cmd,
H
Hannes Reinecke 已提交
4307
			     uint64_t lun,
已提交
4308 4309 4310 4311 4312
			     uint8_t task_mgmt_cmd)
{
	struct lpfc_iocbq *piocbq;
	IOCB_t *piocb;
	struct fcp_cmnd *fcp_cmnd;
4313
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
已提交
4314 4315
	struct lpfc_nodelist *ndlp = rdata->pnode;

4316 4317
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
已提交
4318 4319 4320
		return 0;

	piocbq = &(lpfc_cmd->cur_iocbq);
J
James Smart 已提交
4321 4322
	piocbq->vport = vport;

已提交
4323 4324 4325
	piocb = &piocbq->iocb;

	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4326 4327 4328
	/* Clear out any old data in the FCP command area */
	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
已提交
4329
	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4330 4331
	if (vport->phba->sli_rev == 3 &&
	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4332
		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4333 4334
	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
	piocb->ulpContext = ndlp->nlp_rpi;
4335 4336 4337 4338
	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
		piocb->ulpContext =
		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
	}
4339
	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
已提交
4340
	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4341 4342
	piocb->ulpPU = 0;
	piocb->un.fcpi.fcpi_parm = 0;
已提交
4343 4344 4345 4346 4347 4348 4349 4350

	/* ulpTimeout is only one byte */
	if (lpfc_cmd->timeout > 0xff) {
		/*
		 * Do not timeout the command at the firmware level.
		 * The driver will provide the timeout mechanism.
		 */
		piocb->ulpTimeout = 0;
4351
	} else
已提交
4352
		piocb->ulpTimeout = lpfc_cmd->timeout;
4353

4354 4355
	if (vport->phba->sli_rev == LPFC_SLI_REV4)
		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4356

4357
	return 1;
4358 4359 4360
}

/**
L
Lucas De Marchi 已提交
4361
 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372
 * @phba: The hba struct for which this call is being executed.
 * @dev_grp: The HBA PCI-Device group number.
 *
 * This routine sets up the SCSI interface API function jump table in @phba
 * struct.
 * Returns: 0 - success, -ENODEV - failure.
 **/
int
lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{

4373 4374 4375
	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;

4376 4377 4378 4379
	switch (dev_grp) {
	case LPFC_PCI_DEV_LP:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4380
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4381
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4382
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4383
		break;
4384 4385 4386
	case LPFC_PCI_DEV_OC:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4387
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4388
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4389
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4390
		break;
4391 4392 4393 4394 4395 4396 4397 4398
	default:
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"1418 Invalid HBA PCI-device group: 0x%x\n",
				dev_grp);
		return -ENODEV;
		break;
	}
	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4399
	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4400 4401 4402
	return 0;
}

4403
/**
4404
 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4405 4406 4407 4408 4409 4410 4411
 * @phba: The Hba for which this call is being executed.
 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
 * @rspiocbq: Pointer to lpfc_iocbq data structure.
 *
 * This routine is IOCB completion routine for device reset and target reset
 * routine. This routine release scsi buffer associated with lpfc_cmd.
 **/
4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
			struct lpfc_iocbq *cmdiocbq,
			struct lpfc_iocbq *rspiocbq)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) cmdiocbq->context1;
	if (lpfc_cmd)
		lpfc_release_scsi_buf(phba, lpfc_cmd);
	return;
}

4424
/**
4425
 * lpfc_info - Info entry point of scsi_host_template data structure
4426 4427 4428 4429 4430 4431 4432
 * @host: The scsi host for which this call is being executed.
 *
 * This routine provides module information about hba.
 *
 * Reutrn code:
 *   Pointer to char - Success.
 **/
已提交
4433 4434 4435
const char *
lpfc_info(struct Scsi_Host *host)
{
J
James Smart 已提交
4436 4437
	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4438
	int len, link_speed = 0;
已提交
4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457
	static char  lpfcinfobuf[384];

	memset(lpfcinfobuf,0,384);
	if (phba && phba->pcidev){
		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
		len = strlen(lpfcinfobuf);
		snprintf(lpfcinfobuf + len,
			384-len,
			" on PCI bus %02x device %02x irq %d",
			phba->pcidev->bus->number,
			phba->pcidev->devfn,
			phba->pcidev->irq);
		len = strlen(lpfcinfobuf);
		if (phba->Port[0]) {
			snprintf(lpfcinfobuf + len,
				 384-len,
				 " port %s",
				 phba->Port);
		}
4458
		len = strlen(lpfcinfobuf);
4459
		link_speed = lpfc_sli_port_speed_get(phba);
4460 4461 4462
		if (link_speed != 0)
			snprintf(lpfcinfobuf + len, 384-len,
				 " Logical Link Speed: %d Mbps", link_speed);
已提交
4463 4464 4465 4466
	}
	return lpfcinfobuf;
}

4467
/**
4468
 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4469 4470 4471 4472 4473
 * @phba: The Hba for which this call is being executed.
 *
 * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
 * The default value of cfg_poll_tmo is 10 milliseconds.
 **/
4474 4475 4476 4477 4478
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
	unsigned long  poll_tmo_expires =
		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));

4479
	if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4480 4481 4482 4483
		mod_timer(&phba->fcp_poll_timer,
			  poll_tmo_expires);
}

4484
/**
4485
 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4486 4487 4488 4489
 * @phba: The Hba for which this call is being executed.
 *
 * This routine starts the fcp_poll_timer of @phba.
 **/
4490 4491 4492 4493 4494
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
	lpfc_poll_rearm_timer(phba);
}

4495
/**
4496
 * lpfc_poll_timeout - Restart polling timer
4497 4498 4499 4500 4501 4502
 * @ptr: Map to lpfc_hba data structure pointer.
 *
 * This routine restarts fcp_poll timer, when FCP ring  polling is enable
 * and FCP Ring interrupt is disable.
 **/

4503
void lpfc_poll_timeout(struct timer_list *t)
4504
{
4505
	struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4506 4507

	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4508
		lpfc_sli_handle_fast_ring_event(phba,
4509
			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4510

4511 4512 4513 4514 4515
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}
}

4516
/**
4517
 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528
 * @cmnd: Pointer to scsi_cmnd data structure.
 * @done: Pointer to done routine.
 *
 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
 * This routine prepares an IOCB from scsi command and provides to firmware.
 * The @done callback is invoked after driver finished processing the command.
 *
 * Return value :
 *   0 - Success
 *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
 **/
已提交
4529
static int
4530
lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
已提交
4531
{
J
James Smart 已提交
4532 4533
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4534
	struct lpfc_rport_data *rdata;
4535
	struct lpfc_nodelist *ndlp;
4536
	struct lpfc_scsi_buf *lpfc_cmd;
4537 4538
	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
	int err;
已提交
4539

4540
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4541 4542 4543 4544 4545

	/* sanity check on references */
	if (unlikely(!rdata) || unlikely(!rport))
		goto out_fail_command;

4546 4547 4548
	err = fc_remote_port_chkready(rport);
	if (err) {
		cmnd->result = err;
已提交
4549 4550
		goto out_fail_command;
	}
4551
	ndlp = rdata->pnode;
已提交
4552

4553
	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4554
		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4555

4556 4557 4558 4559
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
				" op:%02x str=%s without registering for"
				" BlockGuard - Rejecting command\n",
4560 4561 4562 4563 4564
				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
				dif_op_str[scsi_get_prot_op(cmnd)]);
		goto out_fail_command;
	}

已提交
4565
	/*
4566 4567
	 * Catch race where our node has transitioned, but the
	 * transport is still transitioning.
已提交
4568
	 */
4569 4570
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
		goto out_tgt_busy;
4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594
	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
				 "3377 Target Queue Full, scsi Id:%d Qdepth:%d"
				 " Pending command:%d"
				 " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
				 " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
				 ndlp->nlp_sid, ndlp->cmd_qdepth,
				 atomic_read(&ndlp->cmd_pending),
				 ndlp->nlp_nodename.u.wwn[0],
				 ndlp->nlp_nodename.u.wwn[1],
				 ndlp->nlp_nodename.u.wwn[2],
				 ndlp->nlp_nodename.u.wwn[3],
				 ndlp->nlp_nodename.u.wwn[4],
				 ndlp->nlp_nodename.u.wwn[5],
				 ndlp->nlp_nodename.u.wwn[6],
				 ndlp->nlp_nodename.u.wwn[7],
				 ndlp->nlp_portname.u.wwn[0],
				 ndlp->nlp_portname.u.wwn[1],
				 ndlp->nlp_portname.u.wwn[2],
				 ndlp->nlp_portname.u.wwn[3],
				 ndlp->nlp_portname.u.wwn[4],
				 ndlp->nlp_portname.u.wwn[5],
				 ndlp->nlp_portname.u.wwn[6],
				 ndlp->nlp_portname.u.wwn[7]);
4595
		goto out_tgt_busy;
4596
	}
4597 4598
	atomic_inc(&ndlp->cmd_pending);

4599
	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
已提交
4600
	if (lpfc_cmd == NULL) {
4601
		lpfc_rampdown_queue_depth(phba);
4602

4603
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4604 4605
				 "0707 driver's buffer pool is empty, "
				 "IO busied\n");
已提交
4606 4607 4608 4609 4610 4611 4612 4613 4614 4615
		goto out_host_busy;
	}

	/*
	 * Store the midlayer's command structure for the completion phase
	 * and complete the command initialization.
	 */
	lpfc_cmd->pCmd  = cmnd;
	lpfc_cmd->rdata = rdata;
	lpfc_cmd->timeout = 0;
4616
	lpfc_cmd->start_time = jiffies;
已提交
4617 4618
	cmnd->host_scribble = (unsigned char *)lpfc_cmd;

4619
	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4620
		if (vport->phba->cfg_enable_bg) {
4621 4622
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4623 4624 4625 4626 4627 4628 4629
					 "9033 BLKGRD: rcvd %s cmd:x%x "
					 "sector x%llx cnt %u pt %x\n",
					 dif_op_str[scsi_get_prot_op(cmnd)],
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
					 blk_rq_sectors(cmnd->request),
					 (cmnd->cmnd[1]>>5));
4630
		}
4631 4632
		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
	} else {
4633
		if (vport->phba->cfg_enable_bg) {
4634 4635
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4636 4637 4638 4639
					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
					 "x%x sector x%llx cnt %u pt %x\n",
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
4640
					 blk_rq_sectors(cmnd->request),
4641
					 (cmnd->cmnd[1]>>5));
4642
		}
4643 4644 4645
		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
	}

已提交
4646 4647 4648
	if (err)
		goto out_host_busy_free_buf;

J
James Smart 已提交
4649
	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
已提交
4650

4651
	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4652
				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4653
	if (err) {
4654 4655
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "3376 FCP could not issue IOCB err %x"
H
Hannes Reinecke 已提交
4656
				 "FCP cmd x%x <%d/%llu> "
4657 4658 4659 4660
				 "sid: x%x did: x%x oxid: x%x "
				 "Data: x%x x%x x%x x%x\n",
				 err, cmnd->cmnd[0],
				 cmnd->device ? cmnd->device->id : 0xffff,
H
Hannes Reinecke 已提交
4661
				 cmnd->device ? cmnd->device->lun : (u64) -1,
4662 4663 4664 4665 4666 4667 4668 4669 4670
				 vport->fc_myDID, ndlp->nlp_DID,
				 phba->sli_rev == LPFC_SLI_REV4 ?
				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
				 (uint32_t)
				 (cmnd->request->timeout / 1000));

4671 4672 4673 4674 4675 4676 4677 4678 4679 4680
		switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
		case WRITE_DATA:
			atomic_dec(&phba->fc4ScsiOutputRequests);
			break;
		case READ_DATA:
			atomic_dec(&phba->fc4ScsiInputRequests);
			break;
		default:
			atomic_dec(&phba->fc4ScsiControlRequests);
		}
已提交
4681
		goto out_host_busy_free_buf;
4682
	}
4683
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4684
		lpfc_sli_handle_fast_ring_event(phba,
4685
			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4686

4687 4688 4689 4690
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
4691 4692 4693
	return 0;

 out_host_busy_free_buf:
4694
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4695
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4696
 out_host_busy:
4697
	atomic_dec(&ndlp->cmd_pending);
已提交
4698 4699
	return SCSI_MLQUEUE_HOST_BUSY;

4700 4701 4702
 out_tgt_busy:
	return SCSI_MLQUEUE_TARGET_BUSY;

已提交
4703
 out_fail_command:
4704
	cmnd->scsi_done(cmnd);
已提交
4705 4706 4707
	return 0;
}

J
Jeff Garzik 已提交
4708

4709
/**
4710
 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4711 4712 4713 4714 4715 4716 4717 4718
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine aborts @cmnd pending in base driver.
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
已提交
4719
static int
4720
lpfc_abort_handler(struct scsi_cmnd *cmnd)
已提交
4721
{
J
James Smart 已提交
4722 4723 4724
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4725 4726
	struct lpfc_iocbq *iocb;
	struct lpfc_iocbq *abtsiocb;
已提交
4727 4728
	struct lpfc_scsi_buf *lpfc_cmd;
	IOCB_t *cmd, *icmd;
4729
	int ret = SUCCESS, status = 0;
4730
	struct lpfc_sli_ring *pring_s4;
4731
	int ret_val;
4732
	unsigned long flags;
4733
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
已提交
4734

4735
	status = fc_block_scsi_eh(cmnd);
4736
	if (status != 0 && status != SUCCESS)
4737
		return status;
4738

4739
	spin_lock_irqsave(&phba->hbalock, flags);
4740 4741
	/* driver queued commands are in process of being flushed */
	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4742
		spin_unlock_irqrestore(&phba->hbalock, flags);
4743 4744 4745 4746 4747 4748
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3168 SCSI Layer abort requested I/O has been "
			"flushed by LLD.\n");
		return FAILED;
	}

4749
	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4750
	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4751
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
4752 4753
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
H
Hannes Reinecke 已提交
4754
			 "x%x ID %d LUN %llu\n",
4755
			 SUCCESS, cmnd->device->id, cmnd->device->lun);
J
James Smart 已提交
4756 4757
		return SUCCESS;
	}
已提交
4758

4759 4760 4761
	iocb = &lpfc_cmd->cur_iocbq;
	/* the command is in process of being cancelled */
	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4762
		spin_unlock_irqrestore(&phba->hbalock, flags);
4763 4764 4765 4766 4767
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3169 SCSI Layer abort requested I/O has been "
			"cancelled by LLD.\n");
		return FAILED;
	}
4768 4769 4770 4771
	/*
	 * If pCmd field of the corresponding lpfc_scsi_buf structure
	 * points to a different SCSI command, then the driver has
	 * already completed this command, but the midlayer did not
4772
	 * see the completion before the eh fired. Just return SUCCESS.
4773
	 */
4774 4775 4776 4777 4778 4779
	if (lpfc_cmd->pCmd != cmnd) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3170 SCSI Layer abort requested I/O has been "
			"completed by LLD.\n");
		goto out_unlock;
	}
已提交
4780

4781
	BUG_ON(iocb->context1 != lpfc_cmd);
已提交
4782

4783 4784 4785 4786 4787 4788 4789 4790
	/* abort issued in recovery is still in progress */
	if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "3389 SCSI Layer I/O Abort Request is pending\n");
		spin_unlock_irqrestore(&phba->hbalock, flags);
		goto wait_for_cmpl;
	}

4791
	abtsiocb = __lpfc_sli_get_iocbq(phba);
4792 4793
	if (abtsiocb == NULL) {
		ret = FAILED;
4794
		goto out_unlock;
已提交
4795 4796
	}

4797 4798 4799
	/* Indicate the IO is being aborted by the driver. */
	iocb->iocb_flag |= LPFC_DRIVER_ABORTED;

已提交
4800
	/*
4801 4802 4803
	 * The scsi command can not be in txq and it is in flight because the
	 * pCmd is still pointig at the SCSI command we have to abort. There
	 * is no need to search the txcmplq. Just send an abort to the FW.
已提交
4804 4805
	 */

4806 4807 4808 4809
	cmd = &iocb->iocb;
	icmd = &abtsiocb->iocb;
	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4810 4811 4812 4813
	if (phba->sli_rev == LPFC_SLI_REV4)
		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
	else
		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
已提交
4814

4815 4816
	icmd->ulpLe = 1;
	icmd->ulpClass = cmd->ulpClass;
4817 4818

	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
4819
	abtsiocb->hba_wqidx = iocb->hba_wqidx;
4820
	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4821 4822
	if (iocb->iocb_flag & LPFC_IO_FOF)
		abtsiocb->iocb_flag |= LPFC_IO_FOF;
4823

J
James Smart 已提交
4824
	if (lpfc_is_link_up(phba))
4825 4826 4827
		icmd->ulpCommand = CMD_ABORT_XRI_CN;
	else
		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
已提交
4828

4829
	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
J
James Smart 已提交
4830
	abtsiocb->vport = vport;
4831
	if (phba->sli_rev == LPFC_SLI_REV4) {
4832
		pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
4833 4834 4835 4836
		if (pring_s4 == NULL) {
			ret = FAILED;
			goto out_unlock;
		}
4837
		/* Note: both hbalock and ring_lock must be set here */
4838
		spin_lock(&pring_s4->ring_lock);
4839 4840
		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
						abtsiocb, 0);
4841
		spin_unlock(&pring_s4->ring_lock);
4842 4843 4844 4845
	} else {
		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
						abtsiocb, 0);
	}
4846
	/* no longer need the lock after this point */
4847
	spin_unlock_irqrestore(&phba->hbalock, flags);
4848

4849 4850

	if (ret_val == IOCB_ERROR) {
4851 4852 4853 4854
		lpfc_sli_release_iocbq(phba, abtsiocb);
		ret = FAILED;
		goto out;
	}
已提交
4855

4856
	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4857
		lpfc_sli_handle_fast_ring_event(phba,
4858
			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4859

4860
wait_for_cmpl:
4861
	lpfc_cmd->waitq = &waitq;
4862
	/* Wait for abort to complete */
4863 4864
	wait_event_timeout(waitq,
			  (lpfc_cmd->pCmd != cmnd),
4865
			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4866 4867

	spin_lock_irqsave(shost->host_lock, flags);
4868
	lpfc_cmd->waitq = NULL;
4869
	spin_unlock_irqrestore(shost->host_lock, flags);
已提交
4870

4871 4872
	if (lpfc_cmd->pCmd == cmnd) {
		ret = FAILED;
4873 4874
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0748 abort handler timed out waiting "
4875
				 "for aborting I/O (xri:x%x) to complete: "
H
Hannes Reinecke 已提交
4876
				 "ret %#x, ID %d, LUN %llu\n",
4877 4878
				 iocb->sli4_xritag, ret,
				 cmnd->device->id, cmnd->device->lun);
已提交
4879
	}
4880
	goto out;
已提交
4881

4882
out_unlock:
4883
	spin_unlock_irqrestore(&phba->hbalock, flags);
4884
out:
4885 4886
	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
H
Hannes Reinecke 已提交
4887
			 "LUN %llu\n", ret, cmnd->device->id,
4888
			 cmnd->device->lun);
4889
	return ret;
4890 4891
}

4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914
static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
{
	switch (task_mgmt_cmd) {
	case FCP_ABORT_TASK_SET:
		return "ABORT_TASK_SET";
	case FCP_CLEAR_TASK_SET:
		return "FCP_CLEAR_TASK_SET";
	case FCP_BUS_RESET:
		return "FCP_BUS_RESET";
	case FCP_LUN_RESET:
		return "FCP_LUN_RESET";
	case FCP_TARGET_RESET:
		return "FCP_TARGET_RESET";
	case FCP_CLEAR_ACA:
		return "FCP_CLEAR_ACA";
	case FCP_TERMINATE_TASK:
		return "FCP_TERMINATE_TASK";
	default:
		return "unknown";
	}
}

4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981

/**
 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 *
 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
static int
lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t rsp_info;
	uint32_t rsp_len;
	uint8_t  rsp_info_code;
	int ret = FAILED;


	if (fcprsp == NULL)
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0703 fcp_rsp is missing\n");
	else {
		rsp_info = fcprsp->rspStatus2;
		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
		rsp_info_code = fcprsp->rspInfo3;


		lpfc_printf_vlog(vport, KERN_INFO,
				 LOG_FCP,
				 "0706 fcp_rsp valid 0x%x,"
				 " rsp len=%d code 0x%x\n",
				 rsp_info,
				 rsp_len, rsp_info_code);

		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
			switch (rsp_info_code) {
			case RSP_NO_FAILURE:
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0715 Task Mgmt No Failure\n");
				ret = SUCCESS;
				break;
			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0716 Task Mgmt Target "
						"reject\n");
				break;
			case RSP_TM_NOT_COMPLETED: /* TM failed */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0717 Task Mgmt Target "
						"failed TM\n");
				break;
			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
						 "0718 Task Mgmt to invalid "
						"LUN\n");
				break;
			}
		}
	}
	return ret;
}


4982
/**
4983 4984 4985 4986 4987 4988
 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
 * @vport: The virtual port for which this call is being executed.
 * @rdata: Pointer to remote port local data
 * @tgt_id: Target ID of remote device.
 * @lun_id: Lun number for the TMF
 * @task_mgmt_cmd: type of TMF to send
4989
 *
4990 4991
 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
 * a remote port.
4992
 *
4993 4994 4995
 * Return Code:
 *   0x2003 - Error
 *   0x2002 - Success.
4996
 **/
已提交
4997
static int
4998 4999 5000
lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
		   unsigned int tgt_id, uint64_t lun_id,
		   uint8_t task_mgmt_cmd)
已提交
5001
{
J
James Smart 已提交
5002
	struct lpfc_hba   *phba = vport->phba;
5003
	struct lpfc_scsi_buf *lpfc_cmd;
5004 5005
	struct lpfc_iocbq *iocbq;
	struct lpfc_iocbq *iocbqrsp;
5006 5007
	struct lpfc_rport_data *rdata;
	struct lpfc_nodelist *pnode;
5008
	int ret;
5009
	int status;
已提交
5010

5011 5012
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
	if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
5013
		return FAILED;
5014
	pnode = rdata->pnode;
5015

5016
	lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
已提交
5017
	if (lpfc_cmd == NULL)
5018
		return FAILED;
5019
	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5020
	lpfc_cmd->rdata = rdata;
5021
	lpfc_cmd->pCmd = cmnd;
已提交
5022

5023 5024
	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
					   task_mgmt_cmd);
5025 5026 5027 5028
	if (!status) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
已提交
5029

5030
	iocbq = &lpfc_cmd->cur_iocbq;
5031
	iocbqrsp = lpfc_sli_get_iocbq(phba);
5032 5033 5034 5035
	if (iocbqrsp == NULL) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
5036
	iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5037

5038
	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
H
Hannes Reinecke 已提交
5039
			 "0702 Issue %s to TGT %d LUN %llu "
5040
			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5041
			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5042 5043
			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
			 iocbq->iocb_flag);
5044

5045
	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5046
					  iocbq, iocbqrsp, lpfc_cmd->timeout);
5047 5048
	if ((status != IOCB_SUCCESS) ||
	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5049 5050 5051 5052 5053 5054 5055 5056 5057 5058
		if (status != IOCB_SUCCESS ||
		    iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0727 TMF %s to TGT %d LUN %llu "
					 "failed (%d, %d) iocb_flag x%x\n",
					 lpfc_taskmgmt_name(task_mgmt_cmd),
					 tgt_id, lun_id,
					 iocbqrsp->iocb.ulpStatus,
					 iocbqrsp->iocb.un.ulpWord[4],
					 iocbq->iocb_flag);
5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072
		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
		if (status == IOCB_SUCCESS) {
			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
				/* Something in the FCP_RSP was invalid.
				 * Check conditions */
				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
			else
				ret = FAILED;
		} else if (status == IOCB_TIMEDOUT) {
			ret = TIMEOUT_ERROR;
		} else {
			ret = FAILED;
		}
	} else
5073 5074
		ret = SUCCESS;

5075
	lpfc_sli_release_iocbq(phba, iocbqrsp);
5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097

	if (ret != TIMEOUT_ERROR)
		lpfc_release_scsi_buf(phba, lpfc_cmd);

	return ret;
}

/**
 * lpfc_chk_tgt_mapped -
 * @vport: The virtual port to check on
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine delays until the scsi target (aka rport) for the
 * command exists (is present and logged in) or we declare it non-existent.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
5098
	struct lpfc_rport_data *rdata;
5099
	struct lpfc_nodelist *pnode;
5100 5101
	unsigned long later;

5102
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5103 5104 5105 5106 5107 5108
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119
	/*
	 * If target is not in a MAPPED state, delay until
	 * target is rediscovered or devloss timeout expires.
	 */
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies)) {
		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
			return FAILED;
		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
			return SUCCESS;
		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5120
		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155
		if (!rdata)
			return FAILED;
		pnode = rdata->pnode;
	}
	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
		return FAILED;
	return SUCCESS;
}

/**
 * lpfc_reset_flush_io_context -
 * @vport: The virtual port (scsi_host) for the flush context
 * @tgt_id: If aborting by Target contect - specifies the target id
 * @lun_id: If aborting by Lun context - specifies the lun id
 * @context: specifies the context level to flush at.
 *
 * After a reset condition via TMF, we need to flush orphaned i/o
 * contexts from the adapter. This routine aborts any contexts
 * outstanding, then waits for their completions. The wait is
 * bounded by devloss_tmo though.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
			uint64_t lun_id, lpfc_ctx_cmd context)
{
	struct lpfc_hba   *phba = vport->phba;
	unsigned long later;
	int cnt;

	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5156
	if (cnt)
5157
		lpfc_sli_abort_taskmgmt(vport,
5158
					&phba->sli.sli3_ring[LPFC_FCP_RING],
5159
					tgt_id, lun_id, context);
5160 5161 5162
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies) && cnt) {
		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5163
		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
已提交
5164 5165
	}
	if (cnt) {
5166
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5167 5168 5169 5170 5171 5172
			"0724 I/O flush failure for context %s : cnt x%x\n",
			((context == LPFC_CTX_LUN) ? "LUN" :
			 ((context == LPFC_CTX_TGT) ? "TGT" :
			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
			cnt);
		return FAILED;
已提交
5173
	}
5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192
	return SUCCESS;
}

/**
 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a device reset by sending a LUN_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5193
	struct lpfc_rport_data *rdata;
5194
	struct lpfc_nodelist *pnode;
5195
	unsigned tgt_id = cmnd->device->id;
H
Hannes Reinecke 已提交
5196
	uint64_t lun_id = cmnd->device->lun;
5197
	struct lpfc_scsi_event_header scsi_event;
5198
	int status;
5199

5200
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5201
	if (!rdata || !rdata->pnode) {
5202
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5203 5204
				 "0798 Device Reset rport failure: rdata x%p\n",
				 rdata);
5205 5206 5207
		return FAILED;
	}
	pnode = rdata->pnode;
5208
	status = fc_block_scsi_eh(cmnd);
5209
	if (status != 0 && status != SUCCESS)
5210
		return status;
5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0721 Device Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
	scsi_event.lun = lun_id;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

5228
	status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5229 5230 5231
						FCP_LUN_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5232
			 "0713 SCSI layer issued Device Reset (%d, %llu) "
5233 5234 5235 5236 5237 5238 5239 5240
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5241 5242
	if (status == SUCCESS)
		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5243
						LPFC_CTX_LUN);
5244 5245

	return status;
5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263
}

/**
 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a target reset by sending a TARGET_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5264
	struct lpfc_rport_data *rdata;
5265
	struct lpfc_nodelist *pnode;
5266
	unsigned tgt_id = cmnd->device->id;
H
Hannes Reinecke 已提交
5267
	uint64_t lun_id = cmnd->device->lun;
5268
	struct lpfc_scsi_event_header scsi_event;
5269
	int status;
5270

5271
	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5272 5273 5274 5275 5276 5277
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0799 Target Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5278
	status = fc_block_scsi_eh(cmnd);
5279
	if (status != 0 && status != SUCCESS)
5280
		return status;
5281 5282 5283 5284 5285

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0722 Target Reset rport failure: rdata x%p\n", rdata);
5286 5287 5288 5289 5290 5291
		if (pnode) {
			spin_lock_irq(shost->host_lock);
			pnode->nlp_flag &= ~NLP_NPR_ADISC;
			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
			spin_unlock_irq(shost->host_lock);
		}
5292 5293 5294
		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
					  LPFC_CTX_TGT);
		return FAST_IO_FAIL;
5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

5306
	status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5307 5308 5309
					FCP_TARGET_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
H
Hannes Reinecke 已提交
5310
			 "0723 SCSI layer issued Target Reset (%d, %llu) "
5311 5312 5313 5314 5315 5316 5317 5318
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5319 5320
	if (status == SUCCESS)
		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5321
					  LPFC_CTX_TGT);
5322
	return status;
已提交
5323 5324
}

5325
/**
5326
 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5327 5328
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
5329 5330
 * This routine does target reset to all targets on @cmnd->device->host.
 * This emulates Parallel SCSI Bus Reset Semantics.
5331
 *
5332 5333 5334
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
5335
 **/
5336
static int
5337
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
已提交
5338
{
J
James Smart 已提交
5339 5340
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
已提交
5341
	struct lpfc_nodelist *ndlp = NULL;
5342
	struct lpfc_scsi_event_header scsi_event;
5343 5344
	int match;
	int ret = SUCCESS, status, i;
5345 5346 5347 5348 5349 5350 5351

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));

5352 5353
	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
已提交
5354

5355
	status = fc_block_scsi_eh(cmnd);
5356
	if (status != 0 && status != SUCCESS)
5357
		return status;
5358

已提交
5359 5360 5361 5362 5363
	/*
	 * Since the driver manages a single bus device, reset all
	 * targets known to the driver.  Should any target reset
	 * fail, this routine returns failure to the midlayer.
	 */
5364
	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5365
		/* Search for mapped node by target ID */
已提交
5366
		match = 0;
J
James Smart 已提交
5367 5368
		spin_lock_irq(shost->host_lock);
		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5369 5370
			if (!NLP_CHK_NODE_ACT(ndlp))
				continue;
5371 5372 5373
			if (vport->phba->cfg_fcp2_no_tgt_reset &&
			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
				continue;
5374
			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5375
			    ndlp->nlp_sid == i &&
5376 5377
			    ndlp->rport &&
			    ndlp->nlp_type & NLP_FCP_TARGET) {
已提交
5378 5379 5380 5381
				match = 1;
				break;
			}
		}
J
James Smart 已提交
5382
		spin_unlock_irq(shost->host_lock);
已提交
5383 5384
		if (!match)
			continue;
5385

5386
		status = lpfc_send_taskmgmt(vport, cmnd,
5387 5388 5389
					i, 0, FCP_TARGET_RESET);

		if (status != SUCCESS) {
5390 5391 5392
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0700 Bus Reset on target %d failed\n",
					 i);
5393
			ret = FAILED;
已提交
5394 5395
		}
	}
5396
	/*
5397 5398 5399 5400
	 * We have to clean up i/o as : they may be orphaned by the TMFs
	 * above; or if any of the TMFs failed, they may be in an
	 * indeterminate state.
	 * We will report success if all the i/o aborts successfully.
5401
	 */
5402 5403 5404

	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
	if (status != SUCCESS)
5405
		ret = FAILED;
5406

5407 5408
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
已提交
5409 5410 5411
	return ret;
}

5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435
/**
 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does host reset to the adaptor port. It brings the HBA
 * offline, performs a board restart, and then brings the board back online.
 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
 * reject all outstanding SCSI commands to the host and error returned
 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
 * of error handling, it will only return error if resetting of the adapter
 * is not successful; in all other cases, will return success.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba *phba = vport->phba;
	int rc, ret = SUCCESS;

5436 5437 5438
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "3172 SCSI layer issued Host Reset Data:\n");

5439
	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5440 5441 5442 5443
	lpfc_offline(phba);
	rc = lpfc_sli_brdrestart(phba);
	if (rc)
		ret = FAILED;
5444 5445 5446
	rc = lpfc_online(phba);
	if (rc)
		ret = FAILED;
5447 5448
	lpfc_unblock_mgmt_io(phba);

5449 5450 5451 5452 5453
	if (ret == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "3323 Failed host reset, bring it offline\n");
		lpfc_sli4_offline_eratt(phba);
	}
5454 5455 5456
	return ret;
}

5457
/**
5458
 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469
 * @sdev: Pointer to scsi_device.
 *
 * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
 * globally available list of scsi buffers. This routine also makes sure scsi
 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
 * of scsi buffer exists for the lifetime of the driver.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
已提交
5470 5471 5472
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
J
James Smart 已提交
5473 5474
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5475
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5476
	uint32_t total = 0;
已提交
5477
	uint32_t num_to_alloc = 0;
5478
	int num_allocated = 0;
5479
	uint32_t sdev_cnt;
5480 5481 5482
	struct lpfc_device_data *device_data;
	unsigned long flags;
	struct lpfc_name target_wwpn;
已提交
5483

5484
	if (!rport || fc_remote_port_chkready(rport))
已提交
5485 5486
		return -ENXIO;

5487
	if (phba->cfg_fof) {
5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505

		/*
		 * Check to see if the device data structure for the lun
		 * exists.  If not, create one.
		 */

		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
		spin_lock_irqsave(&phba->devicelock, flags);
		device_data = __lpfc_get_device_data(phba,
						     &phba->luns,
						     &vport->fc_portname,
						     &target_wwpn,
						     sdev->lun);
		if (!device_data) {
			spin_unlock_irqrestore(&phba->devicelock, flags);
			device_data = lpfc_create_device_data(phba,
							&vport->fc_portname,
							&target_wwpn,
5506 5507 5508
							sdev->lun,
							phba->cfg_XLanePriority,
							true);
5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520
			if (!device_data)
				return -ENOMEM;
			spin_lock_irqsave(&phba->devicelock, flags);
			list_add_tail(&device_data->listentry, &phba->luns);
		}
		device_data->rport_data = rport->dd_data;
		device_data->available = true;
		spin_unlock_irqrestore(&phba->devicelock, flags);
		sdev->hostdata = device_data;
	} else {
		sdev->hostdata = rport->dd_data;
	}
5521
	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
已提交
5522 5523 5524 5525

	/*
	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
	 * available list of scsi buffers.  Don't allocate more than the
5526 5527 5528
	 * HBA limit conveyed to the midlayer via the host structure.  The
	 * formula accounts for the lun_queue_depth + error handlers + 1
	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
已提交
5529 5530
	 */
	total = phba->total_scsi_bufs;
5531
	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5532

5533 5534 5535 5536
	/* If allocated buffers are enough do nothing */
	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
		return 0;

5537 5538
	/* Allow some exchanges to be available always to complete discovery */
	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5539 5540 5541
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0704 At limitation of %d preallocated "
				 "command buffers\n", total);
已提交
5542
		return 0;
5543 5544 5545
	/* Allow some exchanges to be available always to complete discovery */
	} else if (total + num_to_alloc >
		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5546 5547 5548 5549 5550 5551
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0705 Allocation request of %d "
				 "command buffers will exceed max of %d.  "
				 "Reducing allocation request to %d.\n",
				 num_to_alloc, phba->cfg_hba_queue_depth,
				 (phba->cfg_hba_queue_depth - total));
已提交
5552 5553
		num_to_alloc = phba->cfg_hba_queue_depth - total;
	}
5554 5555
	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
	if (num_to_alloc != num_allocated) {
5556 5557 5558 5559 5560
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0708 Allocation request of %d "
					 "command buffers did not succeed.  "
					 "Allocated %d buffers.\n",
					 num_to_alloc, num_allocated);
已提交
5561
	}
5562 5563
	if (num_allocated > 0)
		phba->total_scsi_bufs += num_allocated;
已提交
5564 5565 5566
	return 0;
}

5567
/**
5568
 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5569 5570 5571 5572 5573 5574 5575 5576 5577
 * @sdev: Pointer to scsi_device.
 *
 * This routine configures following items
 *   - Tag command queuing support for @sdev if supported.
 *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
 *
 * Return codes:
 *   0 - Success
 **/
已提交
5578 5579 5580
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
J
James Smart 已提交
5581 5582
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
5583

5584
	scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
已提交
5585

5586
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5587
		lpfc_sli_handle_fast_ring_event(phba,
5588
			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5589 5590 5591 5592
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
5593 5594 5595
	return 0;
}

5596
/**
5597
 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5598 5599 5600 5601
 * @sdev: Pointer to scsi_device.
 *
 * This routine sets @sdev hostatdata filed to null.
 **/
已提交
5602 5603 5604
static void
lpfc_slave_destroy(struct scsi_device *sdev)
{
5605 5606
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5607 5608 5609
	unsigned long flags;
	struct lpfc_device_data *device_data = sdev->hostdata;

5610
	atomic_dec(&phba->sdev_cnt);
5611
	if ((phba->cfg_fof) && (device_data)) {
5612 5613 5614 5615 5616 5617
		spin_lock_irqsave(&phba->devicelock, flags);
		device_data->available = false;
		if (!device_data->oas_enabled)
			lpfc_delete_device_data(phba, device_data);
		spin_unlock_irqrestore(&phba->devicelock, flags);
	}
已提交
5618 5619 5620 5621
	sdev->hostdata = NULL;
	return;
}

5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642
/**
 * lpfc_create_device_data - creates and initializes device data structure for OAS
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun on target
 * @atomic_create: Flag to indicate if memory should be allocated using the
 *		  GFP_ATOMIC flag or not.
 *
 * This routine creates a device data structure which will contain identifying
 * information for the device (host wwpn, target wwpn, lun), state of OAS,
 * whether or not the corresponding lun is available by the system,
 * and pointer to the rport data.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_device_data - Success
 **/
struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
			struct lpfc_name *target_wwpn, uint64_t lun,
5643
			uint32_t pri, bool atomic_create)
5644 5645 5646 5647 5648 5649
{

	struct lpfc_device_data *lun_info;
	int memory_flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5650
	    !(phba->cfg_fof))
5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669
		return NULL;

	/* Attempt to create the device data to contain lun info */

	if (atomic_create)
		memory_flags = GFP_ATOMIC;
	else
		memory_flags = GFP_KERNEL;
	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
	if (!lun_info)
		return NULL;
	INIT_LIST_HEAD(&lun_info->listentry);
	lun_info->rport_data  = NULL;
	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
	       sizeof(struct lpfc_name));
	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
	       sizeof(struct lpfc_name));
	lun_info->device_id.lun = lun;
	lun_info->oas_enabled = false;
5670
	lun_info->priority = pri;
5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688
	lun_info->available = false;
	return lun_info;
}

/**
 * lpfc_delete_device_data - frees a device data structure for OAS
 * @pha: Pointer to host bus adapter structure.
 * @lun_info: Pointer to device data structure to free.
 *
 * This routine frees the previously allocated device data structure passed.
 *
 **/
void
lpfc_delete_device_data(struct lpfc_hba *phba,
			struct lpfc_device_data *lun_info)
{

	if (unlikely(!phba) || !lun_info  ||
5689
	    !(phba->cfg_fof))
5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722
		return;

	if (!list_empty(&lun_info->listentry))
		list_del(&lun_info->listentry);
	mempool_free(lun_info, phba->device_data_mem_pool);
	return;
}

/**
 * __lpfc_get_device_data - returns the device data for the specified lun
 * @pha: Pointer to host bus adapter structure.
 * @list: Point to list to search.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun on target
 *
 * This routine searches the list passed for the specified lun's device data.
 * This function does not hold locks, it is the responsibility of the caller
 * to ensure the proper lock is held before calling the function.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_device_data - Success
 **/
struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
		       struct lpfc_name *vport_wwpn,
		       struct lpfc_name *target_wwpn, uint64_t lun)
{

	struct lpfc_device_data *lun_info;

	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5723
	    !phba->cfg_fof)
5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771
		return NULL;

	/* Check to see if the lun is already enabled for OAS. */

	list_for_each_entry(lun_info, list, listentry) {
		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
			    sizeof(struct lpfc_name)) == 0) &&
		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
			    sizeof(struct lpfc_name)) == 0) &&
		    (lun_info->device_id.lun == lun))
			return lun_info;
	}

	return NULL;
}

/**
 * lpfc_find_next_oas_lun - searches for the next oas lun
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @starting_lun: Pointer to the lun to start searching for
 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
 * @found_target_wwpn: Pointer to the found lun's target wwpn information
 * @found_lun: Pointer to the found lun.
 * @found_lun_status: Pointer to status of the found lun.
 *
 * This routine searches the luns list for the specified lun
 * or the first lun for the vport/target.  If the vport wwpn contains
 * a zero value then a specific vport is not specified. In this case
 * any vport which contains the lun will be considered a match.  If the
 * target wwpn contains a zero value then a specific target is not specified.
 * In this case any target which contains the lun will be considered a
 * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
 * are returned.  The function will also return the next lun if available.
 * If the next lun is not found, starting_lun parameter will be set to
 * NO_MORE_OAS_LUN.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
bool
lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
		       struct lpfc_name *found_vport_wwpn,
		       struct lpfc_name *found_target_wwpn,
		       uint64_t *found_lun,
5772 5773
		       uint32_t *found_lun_status,
		       uint32_t *found_lun_pri)
5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785
{

	unsigned long flags;
	struct lpfc_device_data *lun_info;
	struct lpfc_device_id *device_id;
	uint64_t lun;
	bool found = false;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
	    !starting_lun || !found_vport_wwpn ||
	    !found_target_wwpn || !found_lun || !found_lun_status ||
	    (*starting_lun == NO_MORE_OAS_LUN) ||
5786
	    !phba->cfg_fof)
5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819
		return false;

	lun = *starting_lun;
	*found_lun = NO_MORE_OAS_LUN;
	*starting_lun = NO_MORE_OAS_LUN;

	/* Search for lun or the lun closet in value */

	spin_lock_irqsave(&phba->devicelock, flags);
	list_for_each_entry(lun_info, &phba->luns, listentry) {
		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
			    sizeof(struct lpfc_name)) == 0)) &&
		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
			    sizeof(struct lpfc_name)) == 0)) &&
		    (lun_info->oas_enabled)) {
			device_id = &lun_info->device_id;
			if ((!found) &&
			    ((lun == FIND_FIRST_OAS_LUN) ||
			     (device_id->lun == lun))) {
				*found_lun = device_id->lun;
				memcpy(found_vport_wwpn,
				       &device_id->vport_wwpn,
				       sizeof(struct lpfc_name));
				memcpy(found_target_wwpn,
				       &device_id->target_wwpn,
				       sizeof(struct lpfc_name));
				if (lun_info->available)
					*found_lun_status =
						OAS_LUN_STATUS_EXISTS;
				else
					*found_lun_status = 0;
5820
				*found_lun_pri = lun_info->priority;
5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863
				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
					memset(vport_wwpn, 0x0,
					       sizeof(struct lpfc_name));
				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
					memset(target_wwpn, 0x0,
					       sizeof(struct lpfc_name));
				found = true;
			} else if (found) {
				*starting_lun = device_id->lun;
				memcpy(vport_wwpn, &device_id->vport_wwpn,
				       sizeof(struct lpfc_name));
				memcpy(target_wwpn, &device_id->target_wwpn,
				       sizeof(struct lpfc_name));
				break;
			}
		}
	}
	spin_unlock_irqrestore(&phba->devicelock, flags);
	return found;
}

/**
 * lpfc_enable_oas_lun - enables a lun for OAS operations
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun
 *
 * This routine enables a lun for oas operations.  The routines does so by
 * doing the following :
 *
 *   1) Checks to see if the device data for the lun has been created.
 *   2) If found, sets the OAS enabled flag if not set and returns.
 *   3) Otherwise, creates a device data structure.
 *   4) If successfully created, indicates the device data is for an OAS lun,
 *   indicates the lun is not available and add to the list of luns.
 *
 * Return codes:
 *   false - Error
 *   true - Success
 **/
bool
lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5864
		    struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5865 5866 5867 5868 5869 5870
{

	struct lpfc_device_data *lun_info;
	unsigned long flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5871
	    !phba->cfg_fof)
5872 5873 5874 5875 5876 5877 5878 5879 5880 5881
		return false;

	spin_lock_irqsave(&phba->devicelock, flags);

	/* Check to see if the device data for the lun has been created */
	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
					  target_wwpn, lun);
	if (lun_info) {
		if (!lun_info->oas_enabled)
			lun_info->oas_enabled = true;
5882
		lun_info->priority = pri;
5883 5884 5885 5886 5887 5888
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}

	/* Create an lun info structure and add to list of luns */
	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5889
					   pri, false);
5890 5891
	if (lun_info) {
		lun_info->oas_enabled = true;
5892
		lun_info->priority = pri;
5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922
		lun_info->available = false;
		list_add_tail(&lun_info->listentry, &phba->luns);
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}
	spin_unlock_irqrestore(&phba->devicelock, flags);
	return false;
}

/**
 * lpfc_disable_oas_lun - disables a lun for OAS operations
 * @pha: Pointer to host bus adapter structure.
 * @vport_wwpn: Pointer to vport's wwpn information
 * @target_wwpn: Pointer to target's wwpn information
 * @lun: Lun
 *
 * This routine disables a lun for oas operations.  The routines does so by
 * doing the following :
 *
 *   1) Checks to see if the device data for the lun is created.
 *   2) If present, clears the flag indicating this lun is for OAS.
 *   3) If the lun is not available by the system, the device data is
 *   freed.
 *
 * Return codes:
 *   false - Error
 *   true - Success
 **/
bool
lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5923
		     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5924 5925 5926 5927 5928 5929
{

	struct lpfc_device_data *lun_info;
	unsigned long flags;

	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5930
	    !phba->cfg_fof)
5931 5932 5933 5934 5935 5936 5937 5938 5939 5940
		return false;

	spin_lock_irqsave(&phba->devicelock, flags);

	/* Check to see if the lun is available. */
	lun_info = __lpfc_get_device_data(phba,
					  &phba->luns, vport_wwpn,
					  target_wwpn, lun);
	if (lun_info) {
		lun_info->oas_enabled = false;
5941
		lun_info->priority = pri;
5942 5943 5944 5945 5946 5947 5948 5949 5950
		if (!lun_info->available)
			lpfc_delete_device_data(phba, lun_info);
		spin_unlock_irqrestore(&phba->devicelock, flags);
		return true;
	}

	spin_unlock_irqrestore(&phba->devicelock, flags);
	return false;
}
5951

5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993
static int
lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
	return SCSI_MLQUEUE_HOST_BUSY;
}

static int
lpfc_no_handler(struct scsi_cmnd *cmnd)
{
	return FAILED;
}

static int
lpfc_no_slave(struct scsi_device *sdev)
{
	return -ENODEV;
}

struct scsi_host_template lpfc_template_nvme = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.proc_name		= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_no_command,
	.eh_abort_handler	= lpfc_no_handler,
	.eh_device_reset_handler = lpfc_no_handler,
	.eh_target_reset_handler = lpfc_no_handler,
	.eh_bus_reset_handler	= lpfc_no_handler,
	.eh_host_reset_handler  = lpfc_no_handler,
	.slave_alloc		= lpfc_no_slave,
	.slave_configure	= lpfc_no_slave,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
	.sg_tablesize		= 1,
	.cmd_per_lun		= 1,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_hba_attrs,
	.max_sectors		= 0xFFFF,
	.vendor_id		= LPFC_NL_VENDOR_ID,
	.track_queue_depth	= 0,
};

5994
struct scsi_host_template lpfc_template_no_hr = {
5995 5996
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
J
James Smart 已提交
5997
	.proc_name		= LPFC_DRIVER_NAME,
5998 5999
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
6000
	.eh_timed_out		= fc_eh_timed_out,
6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019
	.eh_abort_handler	= lpfc_abort_handler,
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_hba_attrs,
	.max_sectors		= 0xFFFF,
	.vendor_id		= LPFC_NL_VENDOR_ID,
	.change_queue_depth	= scsi_change_queue_depth,
	.track_queue_depth	= 1,
};

已提交
6020 6021 6022
struct scsi_host_template lpfc_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
J
James Smart 已提交
6023
	.proc_name		= LPFC_DRIVER_NAME,
已提交
6024 6025
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
6026
	.eh_timed_out		= fc_eh_timed_out,
已提交
6027
	.eh_abort_handler	= lpfc_abort_handler,
6028 6029
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
6030
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
6031
	.eh_host_reset_handler  = lpfc_host_reset_handler,
已提交
6032 6033 6034
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
6035
	.scan_finished		= lpfc_scan_finished,
已提交
6036
	.this_id		= -1,
6037
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
已提交
6038 6039
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
J
James Smart 已提交
6040
	.shost_attrs		= lpfc_hba_attrs,
6041
	.max_sectors		= 0xFFFF,
6042
	.vendor_id		= LPFC_NL_VENDOR_ID,
6043
	.change_queue_depth	= scsi_change_queue_depth,
6044
	.track_queue_depth	= 1,
已提交
6045
};
6046 6047 6048 6049

struct scsi_host_template lpfc_vport_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
J
James Smart 已提交
6050
	.proc_name		= LPFC_DRIVER_NAME,
6051 6052
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
6053
	.eh_timed_out		= fc_eh_timed_out,
6054
	.eh_abort_handler	= lpfc_abort_handler,
6055 6056
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
6057 6058 6059 6060 6061
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
6062
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
6063 6064 6065 6066
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_vport_attrs,
	.max_sectors		= 0xFFFF,
6067
	.change_queue_depth	= scsi_change_queue_depth,
6068
	.track_queue_depth	= 1,
6069
};