lpfc_scsi.c 161.5 KB
Newer Older
1
/*******************************************************************
已提交
2
 * This file is part of the Emulex Linux Device Driver for         *
3
 * Fibre Channel Host Bus Adapters.                                *
4
 * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
5
 * EMULEX and SLI are trademarks of Emulex.                        *
已提交
6
 * www.emulex.com                                                  *
7
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
已提交
8 9
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
10 11 12 13 14 15 16 17 18 19
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
已提交
20 21
 *******************************************************************/
#include <linux/pci.h>
22
#include <linux/slab.h>
已提交
23
#include <linux/interrupt.h>
24
#include <linux/export.h>
25
#include <linux/delay.h>
26
#include <asm/unaligned.h>
27 28
#include <linux/crc-t10dif.h>
#include <net/checksum.h>
已提交
29 30 31

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
32
#include <scsi/scsi_eh.h>
已提交
33 34 35 36 37
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>

#include "lpfc_version.h"
38
#include "lpfc_hw4.h"
已提交
39 40
#include "lpfc_hw.h"
#include "lpfc_sli.h"
41
#include "lpfc_sli4.h"
42
#include "lpfc_nl.h"
已提交
43 44
#include "lpfc_disc.h"
#include "lpfc.h"
45
#include "lpfc_scsi.h"
已提交
46 47
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
48
#include "lpfc_vport.h"
已提交
49 50 51 52

#define LPFC_RESET_WAIT  2
#define LPFC_ABORT_WAIT  2

53
int _dump_buf_done = 1;
54 55

static char *dif_op_str[] = {
56 57 58 59 60 61 62 63 64
	"PROT_NORMAL",
	"PROT_READ_INSERT",
	"PROT_WRITE_STRIP",
	"PROT_READ_STRIP",
	"PROT_WRITE_INSERT",
	"PROT_READ_PASS",
	"PROT_WRITE_PASS",
};

65 66 67 68 69 70
struct scsi_dif_tuple {
	__be16 guard_tag;       /* Checksum */
	__be16 app_tag;         /* Opaque storage */
	__be32 ref_tag;         /* Target LBA or indirect LBA */
};

71 72
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
73 74
static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
75 76

static void
77
lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
78 79 80 81 82
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_sglist(cmnd);

	if (!_dump_buf_data) {
83 84
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
85 86 87 88 89 90
				__func__);
		return;
	}


	if (!sgde) {
91 92
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9051 BLKGRD: ERROR: data scatterlist is null\n");
93 94 95 96 97 98 99 100 101 102 103 104 105
		return;
	}

	dst = (void *) _dump_buf_data;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

static void
106
lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
107 108 109 110 111
{
	void *src, *dst;
	struct scatterlist *sgde = scsi_prot_sglist(cmnd);

	if (!_dump_buf_dif) {
112 113
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
114 115 116 117 118
				__func__);
		return;
	}

	if (!sgde) {
119 120
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
121 122 123 124 125 126 127 128 129 130 131 132
		return;
	}

	dst = _dump_buf_dif;
	while (sgde) {
		src = sg_virt(sgde);
		memcpy(dst, src, sgde->length);
		dst += sgde->length;
		sgde = sg_next(sgde);
	}
}

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/**
 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called from the lpfc_prep_task_mgmt_cmd function to
 * set the last bit in the response sge entry.
 **/
static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
				struct lpfc_scsi_buf *lpfc_cmd)
{
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
	if (sgl) {
		sgl += 1;
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}
}

154
/**
155
 * lpfc_update_stats - Update statistical data for the command completion
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called when there is a command completion and this
 * function updates the statistical data for the command completion.
 **/
static void
lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
{
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	unsigned long flags;
	struct Scsi_Host  *shost = cmd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	unsigned long latency;
	int i;

	if (cmd->result)
		return;

177 178
	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);

179 180 181
	spin_lock_irqsave(shost->host_lock, flags);
	if (!vport->stat_data_enabled ||
		vport->stat_data_blocked ||
182
		!pnode ||
183 184 185 186 187 188 189 190 191
		!pnode->lat_data ||
		(phba->bucket_type == LPFC_NO_BUCKET)) {
		spin_unlock_irqrestore(shost->host_lock, flags);
		return;
	}

	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
			phba->bucket_step;
192 193 194 195 196
		/* check array subscript bounds */
		if (i < 0)
			i = 0;
		else if (i >= LPFC_MAX_BUCKET_COUNT)
			i = LPFC_MAX_BUCKET_COUNT - 1;
197 198 199 200 201 202 203 204 205 206 207 208
	} else {
		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
			if (latency <= (phba->bucket_base +
				((1<<i)*phba->bucket_step)))
				break;
	}

	pnode->lat_data[i].cmd_count++;
	spin_unlock_irqrestore(shost->host_lock, flags);
}

/**
209
 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
 * @phba: Pointer to HBA context object.
 * @vport: Pointer to vport object.
 * @ndlp: Pointer to FC node associated with the target.
 * @lun: Lun number of the scsi device.
 * @old_val: Old value of the queue depth.
 * @new_val: New value of the queue depth.
 *
 * This function sends an event to the mgmt application indicating
 * there is a change in the scsi device queue depth.
 **/
static void
lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
		struct lpfc_vport  *vport,
		struct lpfc_nodelist *ndlp,
		uint32_t lun,
		uint32_t old_val,
		uint32_t new_val)
{
	struct lpfc_fast_path_event *fast_path_evt;
	unsigned long flags;

	fast_path_evt = lpfc_alloc_fast_evt(phba);
	if (!fast_path_evt)
		return;

	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
		FC_REG_SCSI_EVENT;
	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
		LPFC_EVENT_VARQUEDEPTH;

	/* Report all luns with change in queue depth */
	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
			&ndlp->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
	}

	fast_path_evt->un.queue_depth_evt.oldval = old_val;
	fast_path_evt->un.queue_depth_evt.newval = new_val;
	fast_path_evt->vport = vport;

	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
	spin_lock_irqsave(&phba->hbalock, flags);
	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	lpfc_worker_wake_up(phba);

	return;
}

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
/**
 * lpfc_change_queue_depth - Alter scsi device queue depth
 * @sdev: Pointer the scsi device on which to change the queue depth.
 * @qdepth: New queue depth to set the sdev to.
 * @reason: The reason for the queue depth change.
 *
 * This function is called by the midlayer and the LLD to alter the queue
 * depth for a scsi device. This function sets the queue depth to the new
 * value and sends an event out to log the queue depth change.
 **/
int
lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
{
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
	struct lpfc_rport_data *rdata;
	unsigned long new_queue_depth, old_queue_depth;

	old_queue_depth = sdev->queue_depth;
	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
	new_queue_depth = sdev->queue_depth;
	rdata = sdev->hostdata;
	if (rdata)
		lpfc_send_sdev_queuedepth_change_event(phba, vport,
						       rdata->pnode, sdev->lun,
						       old_queue_depth,
						       new_queue_depth);
	return sdev->queue_depth;
}

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/**
 * lpfc_change_queue_type() - Change a device's scsi tag queuing type
 * @sdev: Pointer the scsi device whose queue depth is to change
 * @tag_type: Identifier for queue tag type
 */
static int
lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
{
	if (sdev->tagged_supported) {
		scsi_set_tag_type(sdev, tag_type);
		if (tag_type)
			scsi_activate_tcq(sdev, sdev->queue_depth);
		else
			scsi_deactivate_tcq(sdev, sdev->queue_depth);
	} else
		tag_type = 0;

	return tag_type;
}

312
/**
313
 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
314 315 316 317 318 319 320 321 322
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called when there is resource error in driver or firmware.
 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
 * posts at most 1 event each second. This routine wakes up worker thread of
 * @phba to process WORKER_RAM_DOWN_EVENT event.
 *
 * This routine should be called with no lock held.
 **/
323
void
324
lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
325 326
{
	unsigned long flags;
327
	uint32_t evt_posted;
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342

	spin_lock_irqsave(&phba->hbalock, flags);
	atomic_inc(&phba->num_rsrc_err);
	phba->last_rsrc_error_time = jiffies;

	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
		return;
	}

	phba->last_ramp_down_time = jiffies;

	spin_unlock_irqrestore(&phba->hbalock, flags);

	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
343 344
	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
	if (!evt_posted)
345 346 347
		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);

348 349
	if (!evt_posted)
		lpfc_worker_wake_up(phba);
350 351 352
	return;
}

353
/**
354
 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
355 356 357 358 359 360 361 362 363
 * @phba: The Hba for which this call is being executed.
 *
 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
 * post at most 1 event every 5 minute after last_ramp_up_time or
 * last_rsrc_error_time.  This routine wakes up worker thread of @phba
 * to process WORKER_RAM_DOWN_EVENT event.
 *
 * This routine should be called with no lock held.
 **/
364
static inline void
365
lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
366
			uint32_t queue_depth)
367 368
{
	unsigned long flags;
369
	struct lpfc_hba *phba = vport->phba;
370
	uint32_t evt_posted;
371 372
	atomic_inc(&phba->num_cmd_success);

373
	if (vport->cfg_lun_queue_depth <= queue_depth)
374 375
		return;
	spin_lock_irqsave(&phba->hbalock, flags);
376 377 378 379
	if (time_before(jiffies,
			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
	    time_before(jiffies,
			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
380 381 382 383 384 385 386
		spin_unlock_irqrestore(&phba->hbalock, flags);
		return;
	}
	phba->last_ramp_up_time = jiffies;
	spin_unlock_irqrestore(&phba->hbalock, flags);

	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
387 388
	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
	if (!evt_posted)
389 390 391
		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);

392 393 394
	if (!evt_posted)
		lpfc_worker_wake_up(phba);
	return;
395 396
}

397
/**
398
 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
399 400 401 402 403 404
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
 * thread.This routine reduces queue depth for all scsi device on each vport
 * associated with @phba.
 **/
405 406 407
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
408 409
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
410
	struct scsi_device *sdev;
411
	unsigned long new_queue_depth;
412
	unsigned long num_rsrc_err, num_cmd_success;
413
	int i;
414 415 416 417

	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
	num_cmd_success = atomic_read(&phba->num_cmd_success);

418 419 420 421 422 423 424 425
	/*
	 * The error and success command counters are global per
	 * driver instance.  If another handler has already
	 * operated on this error event, just exit.
	 */
	if (num_rsrc_err == 0)
		return;

426 427
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
428
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
429 430
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
431
				new_queue_depth =
432 433 434 435 436 437 438
					sdev->queue_depth * num_rsrc_err /
					(num_rsrc_err + num_cmd_success);
				if (!new_queue_depth)
					new_queue_depth = sdev->queue_depth - 1;
				else
					new_queue_depth = sdev->queue_depth -
								new_queue_depth;
439 440
				lpfc_change_queue_depth(sdev, new_queue_depth,
							SCSI_QDEPTH_DEFAULT);
441
			}
442
		}
443
	lpfc_destroy_vport_work_array(phba, vports);
444 445 446 447
	atomic_set(&phba->num_rsrc_err, 0);
	atomic_set(&phba->num_cmd_success, 0);
}

448
/**
449
 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
450 451 452 453 454 455 456
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
 * thread.This routine increases queue depth for all scsi device on each vport
 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
 * num_cmd_success to zero.
 **/
457 458 459
void
lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
{
460 461
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
462
	struct scsi_device *sdev;
463 464 465 466
	int i;

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
467
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
468 469
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
470 471 472
				if (vports[i]->cfg_lun_queue_depth <=
				    sdev->queue_depth)
					continue;
473 474 475
				lpfc_change_queue_depth(sdev,
							sdev->queue_depth+1,
							SCSI_QDEPTH_RAMP_UP);
476
			}
477
		}
478
	lpfc_destroy_vport_work_array(phba, vports);
479 480 481 482
	atomic_set(&phba->num_rsrc_err, 0);
	atomic_set(&phba->num_cmd_success, 0);
}

483
/**
484
 * lpfc_scsi_dev_block - set all scsi hosts to block state
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
 * @phba: Pointer to HBA context object.
 *
 * This function walks vport list and set each SCSI host to block state
 * by invoking fc_remote_port_delete() routine. This function is invoked
 * with EEH when device's PCI slot has been permanently disabled.
 **/
void
lpfc_scsi_dev_block(struct lpfc_hba *phba)
{
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
	struct scsi_device *sdev;
	struct fc_rport *rport;
	int i;

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
502
		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
503 504 505 506 507 508 509 510 511
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
				rport = starget_to_rport(scsi_target(sdev));
				fc_remote_port_delete(rport);
			}
		}
	lpfc_destroy_vport_work_array(phba, vports);
}

512
/**
513
 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
514
 * @vport: The virtual port for which this call being executed.
515
 * @num_to_allocate: The requested number of buffers to allocate.
516
 *
517 518 519 520 521 522
 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
 * the scsi buffer contains all the necessary information needed to initiate
 * a SCSI I/O. The non-DMAable buffer region contains information to build
 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
 * and the initial BPL. In addition to allocating memory, the FCP CMND and
 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
523 524
 *
 * Return codes:
525 526
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
527
 **/
528 529
static int
lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
已提交
530
{
J
James Smart 已提交
531
	struct lpfc_hba *phba = vport->phba;
已提交
532 533 534
	struct lpfc_scsi_buf *psb;
	struct ulp_bde64 *bpl;
	IOCB_t *iocb;
535 536 537
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
	dma_addr_t pdma_phys_bpl;
538
	uint16_t iotag;
539 540 541 542 543 544 545 546 547 548
	int bcnt, bpl_size;

	bpl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp), bpl_size);
已提交
549

550 551 552 553
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
已提交
554

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
		/*
		 * Get memory from the pci pool to map the virt space to pci
		 * bus space for an I/O.  The DMA buffer includes space for the
		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
		 * necessary to support the sg_tablesize.
		 */
		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
					GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}

		/* Initialize virtual ptrs to dma_buf region. */
		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
					psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;

		psb->fcp_cmnd = psb->data;
		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
584
			sizeof(struct fcp_rsp);
已提交
585

586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
		/* Initialize local short-hand pointers. */
		bpl = psb->fcp_bpl;
		pdma_phys_fcp_cmd = psb->dma_handle;
		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
			sizeof(struct fcp_rsp);

		/*
		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
		 * are sg list bdes.  Initialize the first two and leave the
		 * rest for queuecommand.
		 */
		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);

		/* Setup the physical region for the FCP RSP */
		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		if ((phba->sli_rev == 3) &&
				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
			/* fill in immediate fcp command BDE */
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
					unsli3.fcp_ext.icd);
			iocb->un.fcpi64.bdl.addrHigh = 0;
			iocb->ulpBdeCount = 0;
			iocb->ulpLe = 0;
L
Lucas De Marchi 已提交
627
			/* fill in response BDE */
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
							BUFF_TYPE_BDE_64;
			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
				sizeof(struct fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrLow =
				putPaddrLow(pdma_phys_fcp_rsp);
			iocb->unsli3.fcp_ext.rbde.addrHigh =
				putPaddrHigh(pdma_phys_fcp_rsp);
		} else {
			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
			iocb->un.fcpi64.bdl.bdeSize =
					(2 * sizeof(struct ulp_bde64));
			iocb->un.fcpi64.bdl.addrLow =
					putPaddrLow(pdma_phys_bpl);
			iocb->un.fcpi64.bdl.addrHigh =
					putPaddrHigh(pdma_phys_bpl);
			iocb->ulpBdeCount = 1;
			iocb->ulpLe = 1;
		}
		iocb->ulpClass = CLASS3;
		psb->status = IOSTAT_SUCCESS;
649
		/* Put it back into the SCSI buffer list */
J
James Smart 已提交
650
		psb->cur_iocbq.context1  = psb;
651
		lpfc_release_scsi_buf_s3(phba, psb);
已提交
652

653
	}
已提交
654

655
	return bcnt;
已提交
656 657
}

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
/**
 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
 * @vport: pointer to lpfc vport data structure.
 *
 * This routine is invoked by the vport cleanup for deletions and the cleanup
 * for an ndlp on removal.
 **/
void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;

	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	list_for_each_entry_safe(psb, next_psb,
				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->rdata && psb->rdata->pnode
			&& psb->rdata->pnode->vport == vport)
			psb->rdata = NULL;
	}
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	spin_unlock_irqrestore(&phba->hbalock, iflag);
}

684 685 686 687 688 689 690 691 692 693 694 695 696
/**
 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
 * @phba: pointer to lpfc hba data structure.
 * @axri: pointer to the fcp xri abort wcqe structure.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
 * FCP aborted xri.
 **/
void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
			  struct sli4_wcqe_xri_aborted *axri)
{
	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
697
	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
698 699
	struct lpfc_scsi_buf *psb, *next_psb;
	unsigned long iflag = 0;
700 701
	struct lpfc_iocbq *iocbq;
	int i;
702 703
	struct lpfc_nodelist *ndlp;
	int rrq_empty = 0;
704
	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
705

706 707
	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
708 709 710 711
	list_for_each_entry_safe(psb, next_psb,
		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
		if (psb->cur_iocbq.sli4_xritag == xri) {
			list_del(&psb->list);
712
			psb->exch_busy = 0;
713
			psb->status = IOSTAT_SUCCESS;
714 715
			spin_unlock(
				&phba->sli4_hba.abts_scsi_buf_list_lock);
716 717 718 719 720
			if (psb->rdata && psb->rdata->pnode)
				ndlp = psb->rdata->pnode;
			else
				ndlp = NULL;

721
			rrq_empty = list_empty(&phba->active_rrq_list);
722
			spin_unlock_irqrestore(&phba->hbalock, iflag);
723
			if (ndlp) {
724 725
				lpfc_set_rrq_active(phba, ndlp,
					psb->cur_iocbq.sli4_lxritag, rxid, 1);
726 727
				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
			}
728
			lpfc_release_scsi_buf_s4(phba, psb);
729 730
			if (rrq_empty)
				lpfc_worker_wake_up(phba);
731 732 733
			return;
		}
	}
734 735 736 737 738 739 740 741 742 743 744 745
	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
	for (i = 1; i <= phba->sli.last_iotag; i++) {
		iocbq = phba->sli.iocbq_lookup[i];

		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
			continue;
		if (iocbq->sli4_xritag != xri)
			continue;
		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
		psb->exch_busy = 0;
		spin_unlock_irqrestore(&phba->hbalock, iflag);
746
		if (!list_empty(&pring->txq))
747
			lpfc_worker_wake_up(phba);
748 749 750 751
		return;

	}
	spin_unlock_irqrestore(&phba->hbalock, iflag);
752 753 754
}

/**
755
 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
756
 * @phba: pointer to lpfc hba data structure.
757
 * @post_sblist: pointer to the scsi buffer list.
758
 *
759 760 761 762 763 764
 * This routine walks a list of scsi buffers that was passed in. It attempts
 * to construct blocks of scsi buffer sgls which contains contiguous xris and
 * uses the non-embedded SGL block post mailbox commands to post to the port.
 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
 * embedded SGL post mailbox command for posting. The @post_sblist passed in
 * must be local list, thus no lock is needed when manipulate the list.
765
 *
766
 * Returns: 0 = failure, non-zero number of successfully posted buffers.
767 768
 **/
int
769 770
lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
			     struct list_head *post_sblist, int sb_count)
771
{
772
	struct lpfc_scsi_buf *psb, *psb_next;
773
	int status, sgl_size;
774 775 776 777 778 779 780 781 782 783 784
	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
	dma_addr_t pdma_phys_bpl1;
	int last_xritag = NO_XRI;
	LIST_HEAD(prep_sblist);
	LIST_HEAD(blck_sblist);
	LIST_HEAD(scsi_sblist);

	/* sanity check */
	if (sb_count <= 0)
		return -EINVAL;

785 786 787
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
		list_del_init(&psb->list);
		block_cnt++;
		if ((last_xritag != NO_XRI) &&
		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
			/* a hole in xri block, form a sgl posting block */
			list_splice_init(&prep_sblist, &blck_sblist);
			post_cnt = block_cnt - 1;
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			block_cnt = 1;
		} else {
			/* prepare list for next posting block */
			list_add_tail(&psb->list, &prep_sblist);
			/* enough sgls for non-embed sgl mbox command */
			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
				block_cnt = 0;
807
			}
808 809 810
		}
		num_posting++;
		last_xritag = psb->cur_iocbq.sli4_xritag;
811

812 813 814 815 816 817 818 819
		/* end of repost sgl list condition for SCSI buffers */
		if (num_posting == sb_count) {
			if (post_cnt == 0) {
				/* last sgl posting block */
				list_splice_init(&prep_sblist, &blck_sblist);
				post_cnt = block_cnt;
			} else if (block_cnt == 1) {
				/* last single sgl with non-contiguous xri */
820
				if (sgl_size > SGL_PAGE_SIZE)
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
					pdma_phys_bpl1 = psb->dma_phys_bpl +
								SGL_PAGE_SIZE;
				else
					pdma_phys_bpl1 = 0;
				status = lpfc_sli4_post_sgl(phba,
						psb->dma_phys_bpl,
						pdma_phys_bpl1,
						psb->cur_iocbq.sli4_xritag);
				if (status) {
					/* failure, put on abort scsi list */
					psb->exch_busy = 1;
				} else {
					/* success, put on SCSI buffer list */
					psb->exch_busy = 0;
					psb->status = IOSTAT_SUCCESS;
					num_posted++;
				}
				/* success, put on SCSI buffer sgl list */
				list_add_tail(&psb->list, &scsi_sblist);
			}
		}
842

843 844
		/* continue until a nembed page worth of sgls */
		if (post_cnt == 0)
845
			continue;
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861

		/* post block of SCSI buffer list sgls */
		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
						       post_cnt);

		/* don't reset xirtag due to hole in xri block */
		if (block_cnt == 0)
			last_xritag = NO_XRI;

		/* reset SCSI buffer post count for next round of posting */
		post_cnt = 0;

		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
		while (!list_empty(&blck_sblist)) {
			list_remove_head(&blck_sblist, psb,
					 struct lpfc_scsi_buf, list);
862
			if (status) {
863
				/* failure, put on abort scsi list */
864 865
				psb->exch_busy = 1;
			} else {
866
				/* success, put on SCSI buffer list */
867
				psb->exch_busy = 0;
868
				psb->status = IOSTAT_SUCCESS;
869
				num_posted++;
870
			}
871
			list_add_tail(&psb->list, &scsi_sblist);
872 873
		}
	}
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
	/* Push SCSI buffers with sgl posted to the availble list */
	while (!list_empty(&scsi_sblist)) {
		list_remove_head(&scsi_sblist, psb,
				 struct lpfc_scsi_buf, list);
		lpfc_release_scsi_buf_s4(phba, psb);
	}
	return num_posted;
}

/**
 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
 * @phba: pointer to lpfc hba data structure.
 *
 * This routine walks the list of scsi buffers that have been allocated and
 * repost them to the port by using SGL block post. This is needed after a
 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
 *
 * Returns: 0 = success, non-zero failure.
 **/
int
lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
{
	LIST_HEAD(post_sblist);
	int num_posted, rc = 0;

	/* get all SCSI buffers need to repost to a local list */
902
	spin_lock_irq(&phba->scsi_buf_list_lock);
903
	list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
904
	spin_unlock_irq(&phba->scsi_buf_list_lock);
905 906 907 908 909 910 911 912 913

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist)) {
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
						phba->sli4_hba.scsi_xri_cnt);
		/* failed to post any scsi buffer, return error */
		if (num_posted == 0)
			rc = -EIO;
	}
914 915 916 917 918 919 920 921
	return rc;
}

/**
 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
922
 * This routine allocates scsi buffers for device with SLI-4 interface spec,
923
 * the scsi buffer contains all the necessary information needed to initiate
924 925
 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
 * them on a list, it post them to the port by using SGL block post.
926 927
 *
 * Return codes:
928
 *   int - number of scsi buffers that were allocated and posted.
929 930 931 932 933 934 935 936 937 938 939
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static int
lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_scsi_buf *psb;
	struct sli4_sge *sgl;
	IOCB_t *iocb;
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
940
	dma_addr_t pdma_phys_bpl;
941
	uint16_t iotag, lxri = 0;
942
	int bcnt, num_posted, sgl_size;
943 944 945
	LIST_HEAD(prep_sblist);
	LIST_HEAD(post_sblist);
	LIST_HEAD(scsi_sblist);
946

947 948 949 950 951 952 953 954 955
	sgl_size = phba->cfg_sg_dma_buf_size -
		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));

	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
			 (int)sizeof(struct fcp_cmnd),
			 (int)sizeof(struct fcp_rsp));

956 957 958 959 960
	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
		if (!psb)
			break;
		/*
961 962 963 964
		 * Get memory from the pci pool to map the virt space to
		 * pci bus space for an I/O. The DMA buffer includes space
		 * for the struct fcp_cmnd, struct fcp_rsp and the number
		 * of bde's necessary to support the sg_tablesize.
965 966 967 968 969 970 971 972 973
		 */
		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
						GFP_KERNEL, &psb->dma_handle);
		if (!psb->data) {
			kfree(psb);
			break;
		}
		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

974 975 976 977 978 979 980 981 982
		/* Page alignment is CRITICAL, double check to be sure */
		if (((unsigned long)(psb->data) &
		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
				      psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}

983 984 985
		/* Allocate iotag for psb->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
		if (iotag == 0) {
986 987
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
				psb->data, psb->dma_handle);
988 989 990 991
			kfree(psb);
			break;
		}

992 993
		lxri = lpfc_sli4_next_xritag(phba);
		if (lxri == NO_XRI) {
994 995 996 997 998
			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
			      psb->data, psb->dma_handle);
			kfree(psb);
			break;
		}
999 1000
		psb->cur_iocbq.sli4_lxritag = lxri;
		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
1001 1002
		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
		psb->fcp_bpl = psb->data;
1003
		psb->fcp_cmnd = (psb->data + sgl_size);
1004 1005 1006 1007 1008 1009
		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
					sizeof(struct fcp_cmnd));

		/* Initialize local short-hand pointers. */
		sgl = (struct sli4_sge *)psb->fcp_bpl;
		pdma_phys_bpl = psb->dma_handle;
1010
		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
1011 1012 1013
		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);

		/*
1014 1015 1016
		 * The first two bdes are the FCP_CMD and FCP_RSP.
		 * The balance are sg list bdes. Initialize the
		 * first two and leave the rest for queuecommand.
1017 1018 1019
		 */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
1020
		sgl->word2 = le32_to_cpu(sgl->word2);
1021 1022
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
1023
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
1024 1025 1026 1027 1028
		sgl++;

		/* Setup the physical region for the FCP RSP */
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
1029
		sgl->word2 = le32_to_cpu(sgl->word2);
1030 1031
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
1032
		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050

		/*
		 * Since the IOCB for the FCP I/O is built into this
		 * lpfc_scsi_buf, initialize it with all known data now.
		 */
		iocb = &psb->cur_iocbq.iocb;
		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
		/* setting the BLP size to 2 * sizeof BDE may not be correct.
		 * We are setting the bpl to point to out sgl. An sgl's
		 * entries are 16 bytes, a bpl entries are 12 bytes.
		 */
		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
		iocb->ulpBdeCount = 1;
		iocb->ulpLe = 1;
		iocb->ulpClass = CLASS3;
1051
		psb->cur_iocbq.context1 = psb;
1052 1053
		psb->dma_phys_bpl = pdma_phys_bpl;

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
		/* add the scsi buffer to a post list */
		list_add_tail(&psb->list, &post_sblist);
		spin_lock_irq(&phba->scsi_buf_list_lock);
		phba->sli4_hba.scsi_xri_cnt++;
		spin_unlock_irq(&phba->scsi_buf_list_lock);
	}
	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
			"3021 Allocate %d out of %d requested new SCSI "
			"buffers\n", bcnt, num_to_alloc);

	/* post the list of scsi buffer sgls to port if available */
	if (!list_empty(&post_sblist))
		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
							  &post_sblist, bcnt);
	else
		num_posted = 0;

	return num_posted;
1072 1073
}

1074
/**
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
 * This routine wraps the actual SCSI buffer allocator function pointer from
 * the lpfc_hba struct.
 *
 * Return codes:
 *   int - number of scsi buffers that were allocated.
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static inline int
lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
{
	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
}

/**
1093
 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1094
 * @phba: The HBA for which this call is being executed.
1095 1096 1097 1098 1099 1100 1101 1102
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
1103
static struct lpfc_scsi_buf*
1104
lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
已提交
1105
{
1106 1107
	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1108
	unsigned long iflag = 0;
1109

1110
	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1111
	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1112 1113 1114
	if (lpfc_cmd) {
		lpfc_cmd->seg_cnt = 0;
		lpfc_cmd->nonsg_phys = 0;
1115
		lpfc_cmd->prot_seg_cnt = 0;
1116
	}
1117
	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1118 1119
	return  lpfc_cmd;
}
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
/**
 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
1134
	struct lpfc_scsi_buf *lpfc_cmd ;
1135 1136 1137 1138
	unsigned long iflag = 0;
	int found = 0;

	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1139 1140
	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
							list) {
1141
		if (lpfc_test_rrq_active(phba, ndlp,
1142
					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1143 1144
			continue;
		list_del(&lpfc_cmd->list);
1145 1146 1147 1148
		found = 1;
		lpfc_cmd->seg_cnt = 0;
		lpfc_cmd->nonsg_phys = 0;
		lpfc_cmd->prot_seg_cnt = 0;
1149
		break;
1150
	}
1151 1152 1153 1154 1155 1156
	spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
						 iflag);
	if (!found)
		return NULL;
	else
		return  lpfc_cmd;
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
}
/**
 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
	return  phba->lpfc_get_scsi_buf(phba, ndlp);
}
已提交
1174

1175
/**
1176
 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1177 1178 1179 1180 1181 1182
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
1183
static void
1184
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1185
{
1186
	unsigned long iflag = 0;
已提交
1187

1188
	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1189
	psb->pCmd = NULL;
已提交
1190
	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1191
	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
已提交
1192 1193
}

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
/**
 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
 * and cannot be reused for at least RA_TOV amount of time if it was
 * aborted.
 **/
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
	unsigned long iflag = 0;

1209
	if (psb->exch_busy) {
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
		psb->pCmd = NULL;
		list_add_tail(&psb->list,
			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
					iflag);
	} else {

		spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
		psb->pCmd = NULL;
		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
	}
}

1226
/**
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{

	phba->lpfc_release_scsi_buf(phba, psb);
}

/**
 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1243 1244 1245 1246
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1247 1248 1249
 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
 * through sg elements and format the bdea. This routine also initializes all
 * IOCB fields which are dependent on scsi command request buffer.
1250 1251 1252 1253 1254
 *
 * Return codes:
 *   1 - Error
 *   0 - Success
 **/
已提交
1255
static int
1256
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
已提交
1257 1258 1259 1260 1261
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1262
	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
已提交
1263
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1264
	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
已提交
1265
	dma_addr_t physaddr;
1266
	uint32_t num_bde = 0;
1267
	int nseg, datadir = scsi_cmnd->sc_data_direction;
已提交
1268 1269 1270 1271 1272 1273 1274 1275

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	bpl += 2;
1276
	if (scsi_sg_count(scsi_cmnd)) {
已提交
1277 1278 1279 1280 1281 1282 1283
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

1284 1285 1286 1287 1288
		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
				  scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!nseg))
			return 1;

1289
		lpfc_cmd->seg_cnt = nseg;
已提交
1290
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1291 1292
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9064 BLKGRD: %s: Too many sg segments from "
1293
			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1294
			       __func__, phba->cfg_sg_seg_cnt,
已提交
1295
			       lpfc_cmd->seg_cnt);
1296
			lpfc_cmd->seg_cnt = 0;
1297
			scsi_dma_unmap(scsi_cmnd);
已提交
1298 1299 1300 1301 1302 1303 1304 1305
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the bde's.
1306 1307 1308
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
已提交
1309
		 */
1310
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
已提交
1311
			physaddr = sg_dma_address(sgel);
1312
			if (phba->sli_rev == 3 &&
1313
			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1314
			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
				data_bde->addrLow = putPaddrLow(physaddr);
				data_bde->addrHigh = putPaddrHigh(physaddr);
				data_bde++;
			} else {
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				bpl->tus.f.bdeSize = sg_dma_len(sgel);
				bpl->tus.w = le32_to_cpu(bpl->tus.w);
				bpl->addrLow =
					le32_to_cpu(putPaddrLow(physaddr));
				bpl->addrHigh =
					le32_to_cpu(putPaddrHigh(physaddr));
				bpl++;
			}
已提交
1331
		}
1332
	}
已提交
1333 1334 1335

	/*
	 * Finish initializing those IOCB fields that are dependent on the
1336 1337 1338
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized and for SLI-3 the extended bde count is
	 * explicitly reinitialized since all iocb memory resources are reused.
已提交
1339
	 */
1340
	if (phba->sli_rev == 3 &&
1341 1342
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
			/*
			 * The extended IOCB format can only fit 3 BDE or a BPL.
			 * This I/O has more than 3 BDE so the 1st data bde will
			 * be a BPL that is filled in here.
			 */
			physaddr = lpfc_cmd->dma_handle;
			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
			data_bde->tus.f.bdeSize = (num_bde *
						   sizeof(struct ulp_bde64));
			physaddr += (sizeof(struct fcp_cmnd) +
				     sizeof(struct fcp_rsp) +
				     (2 * sizeof(struct ulp_bde64)));
			data_bde->addrHigh = putPaddrHigh(physaddr);
			data_bde->addrLow = putPaddrLow(physaddr);
L
Lucas De Marchi 已提交
1358
			/* ebde count includes the response bde and data bpl */
1359 1360
			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
		} else {
L
Lucas De Marchi 已提交
1361
			/* ebde count includes the response bde and data bdes */
1362 1363 1364 1365 1366
			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
		}
	} else {
		iocb_cmd->un.fcpi64.bdl.bdeSize =
			((num_bde + 2) * sizeof(struct ulp_bde64));
1367
		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1368
	}
1369
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1370 1371 1372 1373 1374

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
1375
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1376 1377 1378
	return 0;
}

1379 1380 1381 1382 1383 1384 1385
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
{
	return sc->device->sector_size;
}

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1386

1387 1388 1389 1390 1391 1392 1393 1394
/* Return if if error injection is detected by Initiator */
#define BG_ERR_INIT	0x1
/* Return if if error injection is detected by Target */
#define BG_ERR_TGT	0x2
/* Return if if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP	0x10
/* Return if disabling Guard/Ref/App checking is required for error injection */
#define BG_ERR_CHECK	0x20
1395 1396 1397 1398

/**
 * lpfc_bg_err_inject - Determine if we should inject an error
 * @phba: The Hba for which this call is being executed.
1399 1400 1401 1402 1403
 * @sc: The SCSI command to examine
 * @reftag: (out) BlockGuard reference tag for transmitted data
 * @apptag: (out) BlockGuard application tag for transmitted data
 * @new_guard (in) Value to replace CRC with if needed
 *
1404
 * Returns BG_ERR_* bit mask or 0 if request ignored
1405
 **/
1406 1407 1408 1409 1410 1411
static int
lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
1412
	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1413
	struct scsi_dif_tuple *src = NULL;
1414 1415
	struct lpfc_nodelist *ndlp;
	struct lpfc_rport_data *rdata;
1416 1417 1418 1419 1420
	uint32_t op = scsi_get_prot_op(sc);
	uint32_t blksize;
	uint32_t numblks;
	sector_t lba;
	int rc = 0;
1421
	int blockoff = 0;
1422 1423 1424 1425

	if (op == SCSI_PROT_NORMAL)
		return 0;

1426 1427
	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);
1428
	lba = scsi_get_lba(sc);
1429 1430

	/* First check if we need to match the LBA */
1431 1432 1433 1434 1435 1436 1437 1438
	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
		blksize = lpfc_cmd_blksize(sc);
		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;

		/* Make sure we have the right LBA if one is specified */
		if ((phba->lpfc_injerr_lba < lba) ||
			(phba->lpfc_injerr_lba >= (lba + numblks)))
			return 0;
1439 1440 1441 1442 1443 1444 1445
		if (sgpe) {
			blockoff = phba->lpfc_injerr_lba - lba;
			numblks = sg_dma_len(sgpe) /
				sizeof(struct scsi_dif_tuple);
			if (numblks < blockoff)
				blockoff = numblks;
		}
1446 1447
	}

1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
	/* Next check if we need to match the remote NPortID or WWPN */
	rdata = sc->device->hostdata;
	if (rdata && rdata->pnode) {
		ndlp = rdata->pnode;

		/* Make sure we have the right NPortID if one is specified */
		if (phba->lpfc_injerr_nportid  &&
			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
			return 0;

		/*
		 * Make sure we have the right WWPN if one is specified.
		 * wwn[0] should be a non-zero NAA in a good WWPN.
		 */
		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
				sizeof(struct lpfc_name)) != 0))
			return 0;
	}

	/* Setup a ptr to the protection data if the SCSI host provides it */
	if (sgpe) {
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
		src += blockoff;
		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
	}

1475 1476
	/* Should we change the Reference Tag */
	if (reftag) {
1477 1478 1479
		if (phba->lpfc_injerr_wref_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1480 1481 1482 1483 1484 1485 1486 1487
				if (src) {
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */
1488 1489 1490 1491 1492

					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9076 BLKGRD: Injecting reftag error: "
					"write lba x%lx + x%x oldrefTag x%x\n",
					(unsigned long)lba, blockoff,
1493
					be32_to_cpu(src->ref_tag));
1494

1495
					/*
1496 1497
					 * Save the old ref_tag so we can
					 * restore it on completion.
1498
					 */
1499 1500 1501 1502 1503 1504 1505 1506 1507
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_REFTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->ref_tag;
					}
					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1508
					phba->lpfc_injerr_wref_cnt--;
1509 1510 1511 1512 1513 1514 1515
					if (phba->lpfc_injerr_wref_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1516 1517
					rc = BG_ERR_TGT | BG_ERR_CHECK;

1518 1519 1520
					break;
				}
				/* Drop thru */
1521
			case SCSI_PROT_WRITE_INSERT:
1522
				/*
1523 1524 1525
				 * For WRITE_INSERT, force the error
				 * to be sent on the wire. It should be
				 * detected by the Target.
1526
				 */
1527
				/* DEADBEEF will be the reftag on the wire */
1528 1529
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1530 1531 1532 1533 1534 1535 1536
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
					LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1537
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1538 1539

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1540
					"9078 BLKGRD: Injecting reftag error: "
1541 1542
					"write lba x%lx\n", (unsigned long)lba);
				break;
1543
			case SCSI_PROT_WRITE_STRIP:
1544
				/*
1545 1546 1547
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1548
				 */
1549 1550
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_wref_cnt--;
1551 1552 1553 1554 1555 1556 1557
				if (phba->lpfc_injerr_wref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1558
				rc = BG_ERR_INIT;
1559 1560

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1561
					"9077 BLKGRD: Injecting reftag error: "
1562
					"write lba x%lx\n", (unsigned long)lba);
1563
				break;
1564
			}
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575
		}
		if (phba->lpfc_injerr_rref_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1576 1577
				*reftag = 0xDEADBEEF;
				phba->lpfc_injerr_rref_cnt--;
1578 1579 1580 1581 1582 1583 1584
				if (phba->lpfc_injerr_rref_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1585
				rc = BG_ERR_INIT;
1586 1587

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1588
					"9079 BLKGRD: Injecting reftag error: "
1589
					"read lba x%lx\n", (unsigned long)lba);
1590
				break;
1591 1592 1593 1594 1595 1596
			}
		}
	}

	/* Should we change the Application Tag */
	if (apptag) {
1597 1598 1599
		if (phba->lpfc_injerr_wapp_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1600
				if (src) {
1601 1602 1603 1604 1605 1606 1607 1608
					/*
					 * For WRITE_PASS, force the error
					 * to be sent on the wire. It should
					 * be detected by the Target.
					 * If blockoff != 0 error will be
					 * inserted in middle of the IO.
					 */

1609 1610 1611 1612
					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9080 BLKGRD: Injecting apptag error: "
					"write lba x%lx + x%x oldappTag x%x\n",
					(unsigned long)lba, blockoff,
1613
					be16_to_cpu(src->app_tag));
1614 1615

					/*
1616 1617
					 * Save the old app_tag so we can
					 * restore it on completion.
1618
					 */
1619 1620 1621 1622 1623 1624 1625 1626 1627
					if (lpfc_cmd) {
						lpfc_cmd->prot_data_type =
							LPFC_INJERR_APPTAG;
						lpfc_cmd->prot_data_segment =
							src;
						lpfc_cmd->prot_data =
							src->app_tag;
					}
					src->app_tag = cpu_to_be16(0xDEAD);
1628
					phba->lpfc_injerr_wapp_cnt--;
1629 1630 1631 1632 1633 1634 1635
					if (phba->lpfc_injerr_wapp_cnt == 0) {
						phba->lpfc_injerr_nportid = 0;
						phba->lpfc_injerr_lba =
							LPFC_INJERR_LBA_OFF;
						memset(&phba->lpfc_injerr_wwpn,
						  0, sizeof(struct lpfc_name));
					}
1636
					rc = BG_ERR_TGT | BG_ERR_CHECK;
1637 1638 1639
					break;
				}
				/* Drop thru */
1640
			case SCSI_PROT_WRITE_INSERT:
1641
				/*
1642 1643 1644
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1645
				 */
1646
				/* DEAD will be the apptag on the wire */
1647 1648
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1649 1650 1651 1652 1653 1654 1655
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1656
				rc = BG_ERR_TGT | BG_ERR_CHECK;
1657

1658
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1659
					"0813 BLKGRD: Injecting apptag error: "
1660 1661
					"write lba x%lx\n", (unsigned long)lba);
				break;
1662
			case SCSI_PROT_WRITE_STRIP:
1663
				/*
1664 1665 1666
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1667
				 */
1668 1669
				*apptag = 0xDEAD;
				phba->lpfc_injerr_wapp_cnt--;
1670 1671 1672 1673 1674 1675 1676
				if (phba->lpfc_injerr_wapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1677
				rc = BG_ERR_INIT;
1678 1679

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1680
					"0812 BLKGRD: Injecting apptag error: "
1681
					"write lba x%lx\n", (unsigned long)lba);
1682
				break;
1683
			}
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
		}
		if (phba->lpfc_injerr_rapp_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
1695 1696
				*apptag = 0xDEAD;
				phba->lpfc_injerr_rapp_cnt--;
1697 1698 1699 1700 1701 1702 1703
				if (phba->lpfc_injerr_rapp_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1704
				rc = BG_ERR_INIT;
1705 1706

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1707
					"0814 BLKGRD: Injecting apptag error: "
1708
					"read lba x%lx\n", (unsigned long)lba);
1709
				break;
1710 1711 1712 1713
			}
		}
	}

1714

1715
	/* Should we change the Guard Tag */
1716 1717 1718 1719
	if (new_guard) {
		if (phba->lpfc_injerr_wgrd_cnt) {
			switch (op) {
			case SCSI_PROT_WRITE_PASS:
1720
				rc = BG_ERR_CHECK;
1721
				/* Drop thru */
1722 1723

			case SCSI_PROT_WRITE_INSERT:
1724
				/*
1725 1726 1727
				 * For WRITE_INSERT, force the
				 * error to be sent on the wire. It should be
				 * detected by the Target.
1728 1729
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1730 1731 1732 1733 1734 1735 1736
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1737

1738
				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1739
				/* Signals the caller to swap CRC->CSUM */
1740

1741
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1742
					"0817 BLKGRD: Injecting guard error: "
1743 1744
					"write lba x%lx\n", (unsigned long)lba);
				break;
1745
			case SCSI_PROT_WRITE_STRIP:
1746
				/*
1747 1748 1749
				 * For WRITE_STRIP and WRITE_PASS,
				 * force the error on data
				 * being copied from SLI-Host to SLI-Port.
1750 1751
				 */
				phba->lpfc_injerr_wgrd_cnt--;
1752 1753 1754 1755 1756 1757 1758
				if (phba->lpfc_injerr_wgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1759

1760
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1761 1762 1763
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1764
					"0816 BLKGRD: Injecting guard error: "
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
					"write lba x%lx\n", (unsigned long)lba);
				break;
			}
		}
		if (phba->lpfc_injerr_rgrd_cnt) {
			switch (op) {
			case SCSI_PROT_READ_INSERT:
			case SCSI_PROT_READ_STRIP:
			case SCSI_PROT_READ_PASS:
				/*
				 * For READ_STRIP and READ_PASS, force the
				 * error on data being read off the wire. It
				 * should force an IO error to the driver.
				 */
				phba->lpfc_injerr_rgrd_cnt--;
1780 1781 1782 1783 1784 1785 1786
				if (phba->lpfc_injerr_rgrd_cnt == 0) {
					phba->lpfc_injerr_nportid = 0;
					phba->lpfc_injerr_lba =
						LPFC_INJERR_LBA_OFF;
					memset(&phba->lpfc_injerr_wwpn,
						0, sizeof(struct lpfc_name));
				}
1787

1788
				rc = BG_ERR_INIT | BG_ERR_SWAP;
1789 1790 1791 1792 1793 1794
				/* Signals the caller to swap CRC->CSUM */

				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"0818 BLKGRD: Injecting guard error: "
					"read lba x%lx\n", (unsigned long)lba);
			}
1795 1796
		}
	}
1797

1798 1799 1800 1801
	return rc;
}
#endif

1802 1803 1804 1805
/**
 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
 * the specified SCSI command.
 * @phba: The Hba for which this call is being executed.
1806 1807 1808 1809 1810 1811
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
1812
 **/
1813
static int
1814 1815
lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
1816 1817
{
	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1818
	uint8_t ret = 0;
1819 1820 1821 1822 1823

	if (guard_type == SHOST_DIX_GUARD_IP) {
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
1824
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1825
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1826 1827 1828 1829
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1830
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1831
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1832 1833
			break;

1834 1835
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1836
			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1837
			*txop = BG_OP_IN_CSUM_OUT_CRC;
1838 1839 1840 1841
			break;

		case SCSI_PROT_NORMAL:
		default:
1842
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1843 1844
				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
					scsi_get_prot_op(sc));
1845
			ret = 1;
1846 1847 1848
			break;

		}
J
James Smart 已提交
1849
	} else {
1850 1851 1852
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
1853
			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1854
			*txop = BG_OP_IN_NODIF_OUT_CRC;
1855 1856 1857 1858
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1859
			*rxop = BG_OP_IN_CRC_OUT_CRC;
1860
			*txop = BG_OP_IN_CRC_OUT_CRC;
1861 1862 1863 1864
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
J
James Smart 已提交
1865
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1866
			*txop = BG_OP_IN_CRC_OUT_NODIF;
J
James Smart 已提交
1867 1868
			break;

1869 1870
		case SCSI_PROT_NORMAL:
		default:
1871
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
J
James Smart 已提交
1872 1873
				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
					scsi_get_prot_op(sc));
1874
			ret = 1;
1875 1876 1877 1878
			break;
		}
	}

1879
	return ret;
1880 1881
}

1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
 * the specified SCSI command in order to force a guard tag error.
 * @phba: The Hba for which this call is being executed.
 * @sc: The SCSI command to examine
 * @txopt: (out) BlockGuard operation for transmitted data
 * @rxopt: (out) BlockGuard operation for received data
 *
 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
 *
 **/
static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		uint8_t *txop, uint8_t *rxop)
{
	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
	uint8_t ret = 0;

	if (guard_type == SHOST_DIX_GUARD_IP) {
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1906
			*txop = BG_OP_IN_CRC_OUT_NODIF;
1907 1908 1909 1910 1911
			break;

		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1912
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1913 1914 1915 1916
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1917
			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1918
			*txop = BG_OP_IN_CRC_OUT_CSUM;
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;

		}
	} else {
		switch (scsi_get_prot_op(sc)) {
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1931
			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1932 1933 1934 1935
			break;

		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
1936
			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1937
			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1938 1939 1940 1941 1942
			break;

		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1943
			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
			break;

		case SCSI_PROT_NORMAL:
		default:
			break;
		}
	}

	return ret;
}
#endif

/**
 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
1964 1965 1966 1967 1968 1969 1970 1971 1972
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
1973 1974 1975
 *   start of prot group  -->     |          PDE_5          |
 *                                +-------------------------+
 *                                |          PDE_6          |
1976 1977 1978 1979 1980 1981 1982 1983
 *                                +-------------------------+
 *                                |         Data BDE        |
 *                                +-------------------------+
 *                                |more Data BDE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
1984 1985 1986
 *
 * Returns the number of BDEs added to the BPL.
 **/
1987 1988 1989 1990 1991
static int
lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
1992 1993
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
1994
	dma_addr_t physaddr;
1995
	int i = 0, num_bde = 0, status;
1996
	int datadir = sc->sc_data_direction;
1997
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1998
	uint32_t rc;
1999
#endif
2000
	uint32_t checking = 1;
2001
	uint32_t reftag;
J
James Smart 已提交
2002
	unsigned blksize;
2003
	uint8_t txop, rxop;
2004

2005 2006
	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
2007 2008
		goto out;

2009
	/* extract some info from the scsi command for pde*/
2010
	blksize = lpfc_cmd_blksize(sc);
2011
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2012

2013
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2014
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2015
	if (rc) {
2016
		if (rc & BG_ERR_SWAP)
2017
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2018
		if (rc & BG_ERR_CHECK)
2019 2020
			checking = 0;
	}
2021 2022
#endif

2023 2024 2025 2026 2027
	/* setup PDE5 with what we have */
	pde5 = (struct lpfc_pde5 *) bpl;
	memset(pde5, 0, sizeof(struct lpfc_pde5));
	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

2028
	/* Endianness conversion if necessary for PDE5 */
2029
	pde5->word0 = cpu_to_le32(pde5->word0);
J
James Smart 已提交
2030
	pde5->reftag = cpu_to_le32(reftag);
2031

2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
	/* advance bpl and increment bde count */
	num_bde++;
	bpl++;
	pde6 = (struct lpfc_pde6 *) bpl;

	/* setup PDE6 with the rest of the info */
	memset(pde6, 0, sizeof(struct lpfc_pde6));
	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
	bf_set(pde6_optx, pde6, txop);
	bf_set(pde6_oprx, pde6, rxop);
	if (datadir == DMA_FROM_DEVICE) {
2043 2044
		bf_set(pde6_ce, pde6, checking);
		bf_set(pde6_re, pde6, checking);
2045 2046
	}
	bf_set(pde6_ai, pde6, 1);
J
James Smart 已提交
2047 2048
	bf_set(pde6_ae, pde6, 0);
	bf_set(pde6_apptagval, pde6, 0);
2049

2050
	/* Endianness conversion if necessary for PDE6 */
2051 2052 2053 2054
	pde6->word0 = cpu_to_le32(pde6->word0);
	pde6->word1 = cpu_to_le32(pde6->word1);
	pde6->word2 = cpu_to_le32(pde6->word2);

2055
	/* advance bpl and increment bde count */
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
	num_bde++;
	bpl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
		bpl->tus.f.bdeSize = sg_dma_len(sgde);
		if (datadir == DMA_TO_DEVICE)
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		else
			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
		bpl->tus.w = le32_to_cpu(bpl->tus.w);
		bpl++;
		num_bde++;
	}

out:
	return num_bde;
}

2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
/**
 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @bpl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up BPL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
2088 2089 2090 2091 2092 2093 2094 2095 2096
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
2097
 *   start of first prot group  -->   |          PDE_5          |
2098
 *                                    +-------------------------+
2099 2100 2101
 *                                    |          PDE_6          |
 *                                    +-------------------------+
 *                                    |      PDE_7 (Prot BDE)   |
2102 2103 2104 2105 2106
 *                                    +-------------------------+
 *                                    |        Data BDE         |
 *                                    +-------------------------+
 *                                    |more Data BDE's ... (opt)|
 *                                    +-------------------------+
2107
 *   start of new  prot group  -->    |          PDE_5          |
2108 2109 2110 2111 2112 2113
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
2114 2115 2116
 *
 * Returns the number of BDEs added to the BPL.
 **/
2117 2118 2119 2120 2121 2122
static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct ulp_bde64 *bpl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2123 2124
	struct lpfc_pde5 *pde5 = NULL;
	struct lpfc_pde6 *pde6 = NULL;
2125
	struct lpfc_pde7 *pde7 = NULL;
2126 2127
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
2128 2129
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2130 2131
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
2132
	int status;
2133 2134 2135
	int datadir = sc->sc_data_direction;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
2136
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2137
	uint32_t rc;
2138
#endif
2139
	uint32_t checking = 1;
2140
	uint32_t reftag;
2141
	uint8_t txop, rxop;
2142 2143 2144 2145 2146 2147 2148
	int num_bde = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
				sgpe, sgde);
		return 0;
	}

	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command */
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2163
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2164
	if (rc) {
2165
		if (rc & BG_ERR_SWAP)
2166
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2167
		if (rc & BG_ERR_CHECK)
2168 2169 2170 2171 2172 2173
			checking = 0;
	}
#endif

	split_offset = 0;
	do {
2174 2175 2176 2177
		/* Check to see if we ran out of space */
		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
			return num_bde + 3;

2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
		/* setup PDE5 with what we have */
		pde5 = (struct lpfc_pde5 *) bpl;
		memset(pde5, 0, sizeof(struct lpfc_pde5));
		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);

		/* Endianness conversion if necessary for PDE5 */
		pde5->word0 = cpu_to_le32(pde5->word0);
		pde5->reftag = cpu_to_le32(reftag);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;
		pde6 = (struct lpfc_pde6 *) bpl;

		/* setup PDE6 with the rest of the info */
		memset(pde6, 0, sizeof(struct lpfc_pde6));
		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
		bf_set(pde6_optx, pde6, txop);
		bf_set(pde6_oprx, pde6, rxop);
		bf_set(pde6_ce, pde6, checking);
		bf_set(pde6_re, pde6, checking);
		bf_set(pde6_ai, pde6, 1);
		bf_set(pde6_ae, pde6, 0);
		bf_set(pde6_apptagval, pde6, 0);

		/* Endianness conversion if necessary for PDE6 */
		pde6->word0 = cpu_to_le32(pde6->word0);
		pde6->word1 = cpu_to_le32(pde6->word1);
		pde6->word2 = cpu_to_le32(pde6->word2);

		/* advance bpl and increment bde count */
		num_bde++;
		bpl++;

		/* setup the first BDE that points to protection buffer */
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

		pde7 = (struct lpfc_pde7 *) bpl;
		memset(pde7, 0, sizeof(struct lpfc_pde7));
		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);

		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));

		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

		/* check if this pde is crossing the 4K boundary; if so split */
		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
			protgrp_bytes = protgrp_blks * blksize;
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}

		num_bde++;

		/* setup BDE's for data blocks associated with DIF data */
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2246 2247 2248 2249
			/* Check to see if we ran out of space */
			if (num_bde >= phba->cfg_total_seg_cnt)
				return num_bde + 1;

2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
			if (!sgde) {
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
					"9065 BLKGRD:%s Invalid data segment\n",
						__func__);
				return 0;
			}
			bpl++;
			dataphysaddr = sg_dma_address(sgde) + split_offset;
			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
				bpl->tus.f.bdeSize = remainder;
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
				split_offset += bpl->tus.f.bdeSize;
			}

			subtotal += bpl->tus.f.bdeSize;

			if (datadir == DMA_TO_DEVICE)
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			else
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
			bpl->tus.w = le32_to_cpu(bpl->tus.w);

			num_bde++;
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);

		}

		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
			bpl++;
			continue;
		}

		/* are we done ? */
		if (curr_prot == protcnt) {
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
			bpl++;

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9054 BLKGRD: bug in %s\n", __func__);
		}

	} while (!alldone);
out:

	return num_bde;
}

/**
 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_NO_DIF
 *
 * This is usually used when the HBA is instructed to generate
 * DIFs and insert them into data stream (or strip DIF from
 * incoming data stream)
 *
 * The buffer list consists of just one protection group described
 * below:
 *                                +-------------------------+
 *   start of prot group  -->     |         DI_SEED         |
 *                                +-------------------------+
 *                                |         Data SGE        |
 *                                +-------------------------+
 *                                |more Data SGE's ... (opt)|
 *                                +-------------------------+
 *
 *
 * Note: Data s/g buffers have been dma mapped
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datasegcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t physaddr;
	int i = 0, num_sge = 0, status;
	int datadir = sc->sc_data_direction;
	uint32_t reftag;
	unsigned blksize;
	uint8_t txop, rxop;
2365
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2366
	uint32_t rc;
2367
#endif
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380
	uint32_t checking = 1;
	uint32_t dma_len;
	uint32_t dma_offset = 0;

	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
		goto out;

	/* extract some info from the scsi command for pde*/
	blksize = lpfc_cmd_blksize(sc);
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2381
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2382
	if (rc) {
2383
		if (rc & BG_ERR_SWAP)
2384
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2385
		if (rc & BG_ERR_CHECK)
2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
			checking = 0;
	}
#endif

	/* setup DISEED with what we have */
	diseed = (struct sli4_sge_diseed *) sgl;
	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

	/* Endianness conversion if necessary */
	diseed->ref_tag = cpu_to_le32(reftag);
	diseed->ref_tag_tran = diseed->ref_tag;

	/* setup DISEED with the rest of the info */
	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
	if (datadir == DMA_FROM_DEVICE) {
		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
	}
	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

	/* Endianness conversion if necessary for DISEED */
	diseed->word2 = cpu_to_le32(diseed->word2);
	diseed->word3 = cpu_to_le32(diseed->word3);

	/* advance bpl and increment sge count */
	num_sge++;
	sgl++;

	/* assumption: caller has already run dma_map_sg on command data */
	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
		physaddr = sg_dma_address(sgde);
		dma_len = sg_dma_len(sgde);
		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
		if ((i + 1) == datasegcnt)
			bf_set(lpfc_sli4_sge_last, sgl, 1);
		else
			bf_set(lpfc_sli4_sge_last, sgl, 0);
		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);

		sgl->sge_len = cpu_to_le32(dma_len);
		dma_offset += dma_len;

		sgl++;
		num_sge++;
	}

out:
	return num_sge;
}

/**
 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 * @sgl: pointer to buffer list for protection groups
 * @datacnt: number of segments of data that have been dma mapped
 * @protcnt: number of segment of protection data that have been dma mapped
 *
 * This function sets up SGL buffer list for protection groups of
 * type LPFC_PG_TYPE_DIF
 *
 * This is usually used when DIFs are in their own buffers,
 * separate from the data. The HBA can then by instructed
 * to place the DIFs in the outgoing stream.  For read operations,
 * The HBA could extract the DIFs and place it in DIF buffers.
 *
 * The buffer list for this type consists of one or more of the
 * protection groups described below:
 *                                    +-------------------------+
 *   start of first prot group  -->   |         DISEED          |
 *                                    +-------------------------+
 *                                    |      DIF (Prot SGE)     |
 *                                    +-------------------------+
 *                                    |        Data SGE         |
 *                                    +-------------------------+
 *                                    |more Data SGE's ... (opt)|
 *                                    +-------------------------+
 *   start of new  prot group  -->    |         DISEED          |
 *                                    +-------------------------+
 *                                    |          ...            |
 *                                    +-------------------------+
 *
 * Note: It is assumed that both data and protection s/g buffers have been
 *       mapped for DMA
 *
 * Returns the number of SGEs added to the SGL.
 **/
static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
		struct sli4_sge *sgl, int datacnt, int protcnt)
{
	struct scatterlist *sgde = NULL; /* s/g data entry */
	struct scatterlist *sgpe = NULL; /* s/g prot entry */
	struct sli4_sge_diseed *diseed = NULL;
	dma_addr_t dataphysaddr, protphysaddr;
	unsigned short curr_data = 0, curr_prot = 0;
	unsigned int split_offset;
	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
	unsigned int protgrp_blks, protgrp_bytes;
	unsigned int remainder, subtotal;
	int status;
	unsigned char pgdone = 0, alldone = 0;
	unsigned blksize;
	uint32_t reftag;
	uint8_t txop, rxop;
	uint32_t dma_len;
2497
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2498
	uint32_t rc;
2499
#endif
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
	uint32_t checking = 1;
	uint32_t dma_offset = 0;
	int num_sge = 0;

	sgpe = scsi_prot_sglist(sc);
	sgde = scsi_sglist(sc);

	if (!sgpe || !sgde) {
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2510 2511 2512 2513
				sgpe, sgde);
		return 0;
	}

2514 2515
	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
	if (status)
2516 2517
		goto out;

2518
	/* extract some info from the scsi command */
2519
	blksize = lpfc_cmd_blksize(sc);
2520
	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2521

2522
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2523
	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2524
	if (rc) {
2525
		if (rc & BG_ERR_SWAP)
2526
			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2527
		if (rc & BG_ERR_CHECK)
2528 2529
			checking = 0;
	}
2530 2531
#endif

2532 2533
	split_offset = 0;
	do {
2534 2535 2536 2537
		/* Check to see if we ran out of space */
		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
			return num_sge + 3;

2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
		/* setup DISEED with what we have */
		diseed = (struct sli4_sge_diseed *) sgl;
		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);

		/* Endianness conversion if necessary */
		diseed->ref_tag = cpu_to_le32(reftag);
		diseed->ref_tag_tran = diseed->ref_tag;

		/* setup DISEED with the rest of the info */
		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);

		/* Endianness conversion if necessary for DISEED */
		diseed->word2 = cpu_to_le32(diseed->word2);
		diseed->word3 = cpu_to_le32(diseed->word3);

		/* advance sgl and increment bde count */
		num_sge++;
		sgl++;
2562 2563

		/* setup the first BDE that points to protection buffer */
2564 2565
		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2566 2567 2568 2569

		/* must be integer multiple of the DIF block length */
		BUG_ON(protgroup_len % 8);

2570 2571 2572 2573 2574 2575
		/* Now setup DIF SGE */
		sgl->word2 = 0;
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
		sgl->word2 = cpu_to_le32(sgl->word2);
2576

2577 2578 2579
		protgrp_blks = protgroup_len / 8;
		protgrp_bytes = protgrp_blks * blksize;

2580 2581 2582
		/* check if DIF SGE is crossing the 4K boundary; if so split */
		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2583 2584
			protgroup_offset += protgroup_remainder;
			protgrp_blks = protgroup_remainder / 8;
J
James Smart 已提交
2585
			protgrp_bytes = protgrp_blks * blksize;
2586 2587 2588 2589
		} else {
			protgroup_offset = 0;
			curr_prot++;
		}
2590

2591
		num_sge++;
2592

2593
		/* setup SGE's for data blocks associated with DIF data */
2594 2595 2596
		pgdone = 0;
		subtotal = 0; /* total bytes processed for current prot grp */
		while (!pgdone) {
2597 2598 2599 2600
			/* Check to see if we ran out of space */
			if (num_sge >= phba->cfg_total_seg_cnt)
				return num_sge + 1;

2601
			if (!sgde) {
2602
				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2603
					"9086 BLKGRD:%s Invalid data segment\n",
2604 2605 2606
						__func__);
				return 0;
			}
2607
			sgl++;
2608 2609 2610 2611 2612 2613
			dataphysaddr = sg_dma_address(sgde) + split_offset;

			remainder = sg_dma_len(sgde) - split_offset;

			if ((subtotal + remainder) <= protgrp_bytes) {
				/* we can use this whole buffer */
2614
				dma_len = remainder;
2615 2616 2617 2618 2619 2620
				split_offset = 0;

				if ((subtotal + remainder) == protgrp_bytes)
					pgdone = 1;
			} else {
				/* must split this buffer with next prot grp */
2621 2622
				dma_len = protgrp_bytes - subtotal;
				split_offset += dma_len;
2623 2624
			}

2625
			subtotal += dma_len;
2626

2627 2628 2629 2630 2631
			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
			bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2632

2633 2634 2635 2636
			sgl->sge_len = cpu_to_le32(dma_len);
			dma_offset += dma_len;

			num_sge++;
2637 2638 2639 2640 2641 2642 2643 2644 2645
			curr_data++;

			if (split_offset)
				break;

			/* Move to the next s/g segment if possible */
			sgde = sg_next(sgde);
		}

2646 2647 2648
		if (protgroup_offset) {
			/* update the reference tag */
			reftag += protgrp_blks;
2649
			sgl++;
2650 2651 2652
			continue;
		}

2653 2654
		/* are we done ? */
		if (curr_prot == protcnt) {
2655
			bf_set(lpfc_sli4_sge_last, sgl, 1);
2656 2657 2658 2659
			alldone = 1;
		} else if (curr_prot < protcnt) {
			/* advance to next prot buffer */
			sgpe = sg_next(sgpe);
2660
			sgl++;
2661 2662 2663 2664 2665

			/* update the reference tag */
			reftag += protgrp_blks;
		} else {
			/* if we're here, we have a bug */
2666
			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2667
				"9085 BLKGRD: bug in %s\n", __func__);
2668 2669 2670
		}

	} while (!alldone);
2671

2672 2673
out:

2674
	return num_sge;
2675
}
2676

2677 2678 2679 2680 2681
/**
 * lpfc_prot_group_type - Get prtotection group type of SCSI command
 * @phba: The Hba for which this call is being executed.
 * @sc: pointer to scsi command we're working on
 *
2682 2683 2684
 * Given a SCSI command that supports DIF, determine composition of protection
 * groups involved in setting up buffer lists
 *
2685 2686 2687
 * Returns: Protection group type (with or without DIF)
 *
 **/
2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
{
	int ret = LPFC_PG_TYPE_INVALID;
	unsigned char op = scsi_get_prot_op(sc);

	switch (op) {
	case SCSI_PROT_READ_STRIP:
	case SCSI_PROT_WRITE_INSERT:
		ret = LPFC_PG_TYPE_NO_DIF;
		break;
	case SCSI_PROT_READ_INSERT:
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_PASS:
	case SCSI_PROT_WRITE_PASS:
		ret = LPFC_PG_TYPE_DIF_BUF;
		break;
	default:
		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
				"9021 Unsupported protection op:%d\n", op);
		break;
	}

	return ret;
}

2714 2715 2716 2717 2718
/**
 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
 *
2719 2720 2721
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
2722
 **/
2723
static int
2724
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	uint32_t num_bde = 0;
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
	int diflen, fcpdl;
	unsigned blksize;

	/*
	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
	 *  fcp_rsp regions to the first data bde entry
	 */
	bpl += 2;
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		lpfc_cmd->seg_cnt = datasegcnt;
2756 2757 2758 2759

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
2760 2761 2762 2763 2764

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
2765 2766 2767 2768 2769

			/* Here we need to add a PDE5 and PDE6 to the count */
			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
				goto err;

2770 2771
			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
					datasegcnt);
2772
			/* we should have 2 or more entries in buffer list */
2773 2774 2775
			if (num_bde < 2)
				goto err;
			break;
2776 2777

		case LPFC_PG_TYPE_DIF_BUF:
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
2792 2793 2794 2795 2796 2797 2798 2799

			/*
			 * There is a minimun of 4 BPLs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 4) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
2800 2801 2802

			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
					datasegcnt, protsegcnt);
2803
			/* we should have 3 or more entries in buffer list */
2804 2805
			if ((num_bde < 3) ||
			    (num_bde > phba->cfg_total_seg_cnt))
2806 2807
				goto err;
			break;
2808

2809 2810
		case LPFC_PG_TYPE_INVALID:
		default:
2811 2812 2813
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9022 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
	 * reinitialized since all iocb memory resources are used many times
	 * for transmit, receive, and continuation bpl's.
	 */
	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
	iocb_cmd->ulpBdeCount = 1;
	iocb_cmd->ulpLe = 1;

	fcpdl = scsi_bufflen(scsi_cmnd);

	if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
		/*
		 * We are in DIF Type 1 mode
		 * Every data block has a 8 byte DIF (trailer)
		 * attached to it.  Must ajust FCP data length
		 */
		blksize = lpfc_cmd_blksize(scsi_cmnd);
		diflen = (fcpdl / blksize) * 8;
		fcpdl += diflen;
	}
	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

已提交
2852
	return 0;
2853
err:
2854 2855 2856 2857 2858 2859 2860
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

2861
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2862 2863 2864 2865
			"9023 Cannot setup S/G List for HBA"
			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2866
			prot_group_type, num_bde);
2867 2868 2869

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
2870 2871 2872
	return 1;
}

2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080
/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CRC algorithmn
 * using crc_t10dif.
 */
uint16_t
lpfc_bg_crc(uint8_t *data, int count)
{
	uint16_t crc = 0;
	uint16_t x;

	crc = crc_t10dif(data, count);
	x = cpu_to_be16(crc);
	return x;
}

/*
 * This function calcuates the T10 DIF guard tag
 * on the specified data using a CSUM algorithmn
 * using ip_compute_csum.
 */
uint16_t
lpfc_bg_csum(uint8_t *data, int count)
{
	uint16_t ret;

	ret = ip_compute_csum(data, count);
	return ret;
}

/*
 * This function examines the protection data to try to determine
 * what type of T10-DIF error occurred.
 */
void
lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scatterlist *sgpe; /* s/g prot entry */
	struct scatterlist *sgde; /* s/g data entry */
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct scsi_dif_tuple *src = NULL;
	uint8_t *data_src = NULL;
	uint16_t guard_tag, guard_type;
	uint16_t start_app_tag, app_tag;
	uint32_t start_ref_tag, ref_tag;
	int prot, protsegcnt;
	int err_type, len, data_len;
	int chk_ref, chk_app, chk_guard;
	uint16_t sum;
	unsigned blksize;

	err_type = BGS_GUARD_ERR_MASK;
	sum = 0;
	guard_tag = 0;

	/* First check to see if there is protection data to examine */
	prot = scsi_get_prot_op(cmd);
	if ((prot == SCSI_PROT_READ_STRIP) ||
	    (prot == SCSI_PROT_WRITE_INSERT) ||
	    (prot == SCSI_PROT_NORMAL))
		goto out;

	/* Currently the driver just supports ref_tag and guard_tag checking */
	chk_ref = 1;
	chk_app = 0;
	chk_guard = 0;

	/* Setup a ptr to the protection data provided by the SCSI host */
	sgpe = scsi_prot_sglist(cmd);
	protsegcnt = lpfc_cmd->prot_seg_cnt;

	if (sgpe && protsegcnt) {

		/*
		 * We will only try to verify guard tag if the segment
		 * data length is a multiple of the blksize.
		 */
		sgde = scsi_sglist(cmd);
		blksize = lpfc_cmd_blksize(cmd);
		data_src = (uint8_t *)sg_virt(sgde);
		data_len = sgde->length;
		if ((data_len & (blksize - 1)) == 0)
			chk_guard = 1;
		guard_type = scsi_host_get_guard(cmd->device->host);

		start_ref_tag = scsi_get_lba(cmd);
		start_app_tag = src->app_tag;
		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
		len = sgpe->length;
		while (src && protsegcnt) {
			while (len) {

				/*
				 * First check to see if a protection data
				 * check is valid
				 */
				if ((src->ref_tag == 0xffffffff) ||
				    (src->app_tag == 0xffff)) {
					start_ref_tag++;
					goto skipit;
				}

				/* App Tag checking */
				app_tag = src->app_tag;
				if (chk_app && (app_tag != start_app_tag)) {
					err_type = BGS_APPTAG_ERR_MASK;
					goto out;
				}

				/* Reference Tag checking */
				ref_tag = be32_to_cpu(src->ref_tag);
				if (chk_ref && (ref_tag != start_ref_tag)) {
					err_type = BGS_REFTAG_ERR_MASK;
					goto out;
				}
				start_ref_tag++;

				/* Guard Tag checking */
				if (chk_guard) {
					guard_tag = src->guard_tag;
					if (guard_type == SHOST_DIX_GUARD_IP)
						sum = lpfc_bg_csum(data_src,
								   blksize);
					else
						sum = lpfc_bg_crc(data_src,
								  blksize);
					if ((guard_tag != sum)) {
						err_type = BGS_GUARD_ERR_MASK;
						goto out;
					}
				}
skipit:
				len -= sizeof(struct scsi_dif_tuple);
				if (len < 0)
					len = 0;
				src++;

				data_src += blksize;
				data_len -= blksize;

				/*
				 * Are we at the end of the Data segment?
				 * The data segment is only used for Guard
				 * tag checking.
				 */
				if (chk_guard && (data_len == 0)) {
					chk_guard = 0;
					sgde = sg_next(sgde);
					if (!sgde)
						goto out;

					data_src = (uint8_t *)sg_virt(sgde);
					data_len = sgde->length;
					if ((data_len & (blksize - 1)) == 0)
						chk_guard = 1;
				}
			}

			/* Goto the next Protection data segment */
			sgpe = sg_next(sgpe);
			if (sgpe) {
				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
				len = sgpe->length;
			} else {
				src = NULL;
			}
			protsegcnt--;
		}
	}
out:
	if (err_type == BGS_GUARD_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x1);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				sum, guard_tag);

	} else if (err_type == BGS_REFTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x3);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				ref_tag, start_ref_tag);

	} else if (err_type == BGS_APPTAG_ERR_MASK) {
		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
					0x10, 0x2);
		cmd->result = DRIVER_SENSE << 24
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
				(unsigned long)scsi_get_lba(cmd),
				app_tag, start_app_tag);
	}
}


3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
/*
 * This function checks for BlockGuard errors detected by
 * the HBA.  In case of errors, the ASC/ASCQ fields in the
 * sense buffer will be set accordingly, paired with
 * ILLEGAL_REQUEST to signal to the kernel that the HBA
 * detected corruption.
 *
 * Returns:
 *  0 - No error found
 *  1 - BlockGuard error found
 * -1 - Internal error (bad profile, ...etc)
 */
static int
lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
			struct lpfc_iocbq *pIocbOut)
{
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
	int ret = 0;
	uint32_t bghm = bgf->bghm;
	uint32_t bgstat = bgf->bgstat;
	uint64_t failing_sector = 0;

	spin_lock(&_dump_buf_lock);
	if (!_dump_buf_done) {
3106 3107
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
			" Data for %u blocks to debugfs\n",
3108
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3109
		lpfc_debug_save_data(phba, cmd);
3110 3111 3112 3113

		/* If we have a prot sgl, save the DIF buffer */
		if (lpfc_prot_group_type(phba, cmd) ==
				LPFC_PG_TYPE_DIF_BUF) {
3114 3115 3116 3117
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
				"Saving DIF for %u blocks to debugfs\n",
				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
			lpfc_debug_save_dif(phba, cmd);
3118 3119 3120 3121 3122 3123 3124 3125
		}

		_dump_buf_done = 1;
	}
	spin_unlock(&_dump_buf_lock);

	if (lpfc_bgs_get_invalid_prof(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3126 3127 3128 3129 3130 3131
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9072 BLKGRD: Invalid BG Profile in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3132 3133 3134 3135 3136 3137
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
		cmd->result = ScsiResult(DID_ERROR, 0);
3138 3139 3140 3141 3142 3143
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3144 3145 3146 3147 3148 3149 3150 3151 3152
		ret = (-1);
		goto out;
	}

	if (lpfc_bgs_get_guard_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x1);
M
Martin K. Petersen 已提交
3153
		cmd->result = DRIVER_SENSE << 24
3154 3155
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
		phba->bg_guard_err_cnt++;
3156 3157 3158 3159 3160 3161
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9055 BLKGRD: Guard Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3162 3163 3164 3165 3166 3167 3168
	}

	if (lpfc_bgs_get_reftag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x3);
M
Martin K. Petersen 已提交
3169
		cmd->result = DRIVER_SENSE << 24
3170 3171 3172
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_reftag_err_cnt++;
3173 3174 3175 3176 3177 3178
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9056 BLKGRD: Ref Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3179 3180 3181 3182 3183 3184 3185
	}

	if (lpfc_bgs_get_apptag_err(bgstat)) {
		ret = 1;

		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
				0x10, 0x2);
M
Martin K. Petersen 已提交
3186
		cmd->result = DRIVER_SENSE << 24
3187 3188 3189
			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);

		phba->bg_apptag_err_cnt++;
3190 3191 3192 3193 3194 3195
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9061 BLKGRD: App Tag error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);
3196 3197 3198 3199 3200
	}

	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
		/*
		 * setup sense data descriptor 0 per SPC-4 as an information
J
James Smart 已提交
3201 3202 3203
		 * field, and put the failing LBA in it.
		 * This code assumes there was also a guard/app/ref tag error
		 * indication.
3204
		 */
J
James Smart 已提交
3205 3206 3207 3208
		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223

		/* bghm is a "on the wire" FC frame based count */
		switch (scsi_get_prot_op(cmd)) {
		case SCSI_PROT_READ_INSERT:
		case SCSI_PROT_WRITE_STRIP:
			bghm /= cmd->device->sector_size;
			break;
		case SCSI_PROT_READ_STRIP:
		case SCSI_PROT_WRITE_INSERT:
		case SCSI_PROT_READ_PASS:
		case SCSI_PROT_WRITE_PASS:
			bghm /= (cmd->device->sector_size +
				sizeof(struct scsi_dif_tuple));
			break;
		}
3224 3225 3226 3227

		failing_sector = scsi_get_lba(cmd);
		failing_sector += bghm;

J
James Smart 已提交
3228 3229
		/* Descriptor Information */
		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3230 3231 3232 3233
	}

	if (!ret) {
		/* No error was reported - problem in FW? */
3234 3235 3236 3237 3238 3239 3240 3241 3242
		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
				"9057 BLKGRD: Unknown error in cmd"
				" 0x%x lba 0x%llx blk cnt 0x%x "
				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
				(unsigned long long)scsi_get_lba(cmd),
				blk_rq_sectors(cmd->request), bgstat, bghm);

		/* Calcuate what type of error it was */
		lpfc_calc_bg_err(phba, lpfc_cmd);
3243 3244 3245
	}
out:
	return ret;
已提交
3246 3247
}

3248 3249 3250 3251 3252 3253 3254 3255 3256
/**
 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 * field of @lpfc_cmd for device with SLI-4 interface spec.
 *
 * Return codes:
3257 3258
 *	1 - Error
 *	0 - Success
3259 3260 3261 3262 3263 3264 3265 3266
 **/
static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3267
	struct sli4_sge *first_data_sgl;
3268 3269 3270 3271 3272 3273
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	dma_addr_t physaddr;
	uint32_t num_bde = 0;
	uint32_t dma_len;
	uint32_t dma_offset = 0;
	int nseg;
3274
	struct ulp_bde64 *bde;
3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

		nseg = scsi_dma_map(scsi_cmnd);
		if (unlikely(!nseg))
			return 1;
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
		sgl += 1;
3299
		first_data_sgl = sgl;
3300 3301
		lpfc_cmd->seg_cnt = nseg;
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3302 3303 3304 3305
			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
				" %s: Too many sg segments from "
				"dma_map_sg.  Config %d, seg_cnt %d\n",
				__func__, phba->cfg_sg_seg_cnt,
3306
			       lpfc_cmd->seg_cnt);
3307
			lpfc_cmd->seg_cnt = 0;
3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325
			scsi_dma_unmap(scsi_cmnd);
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the sge's.
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
		 */
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
			physaddr = sg_dma_address(sgel);
			dma_len = sg_dma_len(sgel);
			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3326
			sgl->word2 = le32_to_cpu(sgl->word2);
3327 3328 3329 3330 3331
			if ((num_bde + 1) == nseg)
				bf_set(lpfc_sli4_sge_last, sgl, 1);
			else
				bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3332
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3333
			sgl->word2 = cpu_to_le32(sgl->word2);
3334
			sgl->sge_len = cpu_to_le32(dma_len);
3335 3336 3337
			dma_offset += dma_len;
			sgl++;
		}
3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
		/* setup the performance hint (first data BDE) if enabled */
		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
			bde = (struct ulp_bde64 *)
					&(iocb_cmd->unsli3.sli3Words[5]);
			bde->addrLow = first_data_sgl->addr_lo;
			bde->addrHigh = first_data_sgl->addr_hi;
			bde->tus.f.bdeSize =
					le32_to_cpu(first_data_sgl->sge_len);
			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			bde->tus.w = cpu_to_le32(bde->tus.w);
		}
3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372
	} else {
		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 1);
		sgl->word2 = cpu_to_le32(sgl->word2);
	}

	/*
	 * Finish initializing those IOCB fields that are dependent on the
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized.
	 * all iocb memory resources are reused.
	 */
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
	return 0;
}

3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428
/**
 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
 *
 * Adjust the data length to account for how much data
 * is actually on the wire.
 *
 * returns the adjusted data length
 **/
static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
	int diflen, fcpdl;
	unsigned blksize;

	fcpdl = scsi_bufflen(sc);

	/* Check if there is protection data on the wire */
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
		/* Read */
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
			return fcpdl;

	} else {
		/* Write */
		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
			return fcpdl;
	}

	/* If protection data on the wire, adjust the count accordingly */
	blksize = lpfc_cmd_blksize(sc);
	diflen = (fcpdl / blksize) * 8;
	fcpdl += diflen;
	return fcpdl;
}

/**
 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This is the protection/DIF aware version of
 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
 * two functions eventually, but for now, it's here
 **/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
		struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3429
	uint32_t num_sge = 0;
3430 3431 3432 3433 3434 3435
	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
	int prot_group_type = 0;
	int fcpdl;

	/*
	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3436
	 *  fcp_rsp regions to the first data sge entry
3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458
	 */
	if (scsi_sg_count(scsi_cmnd)) {
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */
		datasegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_sglist(scsi_cmnd),
					scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!datasegcnt))
			return 1;

		sgl += 1;
		/* clear the last flag in the fcp_rsp map entry */
		sgl->word2 = le32_to_cpu(sgl->word2);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);

		sgl += 1;
		lpfc_cmd->seg_cnt = datasegcnt;
3459 3460 3461 3462

		/* First check if data segment count from SCSI Layer is good */
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
			goto err;
3463 3464 3465 3466 3467

		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);

		switch (prot_group_type) {
		case LPFC_PG_TYPE_NO_DIF:
3468 3469 3470 3471 3472
			/* Here we need to add a DISEED to the count */
			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
				goto err;

			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3473
					datasegcnt);
3474

3475
			/* we should have 2 or more entries in buffer list */
3476
			if (num_sge < 2)
3477 3478
				goto err;
			break;
3479 3480

		case LPFC_PG_TYPE_DIF_BUF:
3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
			/*
			 * This type indicates that protection buffers are
			 * passed to the driver, so that needs to be prepared
			 * for DMA
			 */
			protsegcnt = dma_map_sg(&phba->pcidev->dev,
					scsi_prot_sglist(scsi_cmnd),
					scsi_prot_sg_count(scsi_cmnd), datadir);
			if (unlikely(!protsegcnt)) {
				scsi_dma_unmap(scsi_cmnd);
				return 1;
			}

			lpfc_cmd->prot_seg_cnt = protsegcnt;
3495 3496 3497 3498 3499 3500 3501
			/*
			 * There is a minimun of 3 SGEs used for every
			 * protection data segment.
			 */
			if ((lpfc_cmd->prot_seg_cnt * 3) >
			    (phba->cfg_total_seg_cnt - 2))
				goto err;
3502

3503
			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3504
					datasegcnt, protsegcnt);
3505

3506
			/* we should have 3 or more entries in buffer list */
3507 3508
			if ((num_sge < 3) ||
			    (num_sge > phba->cfg_total_seg_cnt))
3509 3510
				goto err;
			break;
3511

3512 3513
		case LPFC_PG_TYPE_INVALID:
		default:
3514 3515 3516
			scsi_dma_unmap(scsi_cmnd);
			lpfc_cmd->seg_cnt = 0;

3517 3518 3519 3520 3521 3522 3523
			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
					"9083 Unexpected protection group %i\n",
					prot_group_type);
			return 1;
		}
	}

3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538
	switch (scsi_get_prot_op(scsi_cmnd)) {
	case SCSI_PROT_WRITE_STRIP:
	case SCSI_PROT_READ_STRIP:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
		break;
	case SCSI_PROT_WRITE_INSERT:
	case SCSI_PROT_READ_INSERT:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
		break;
	case SCSI_PROT_WRITE_PASS:
	case SCSI_PROT_READ_PASS:
		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
		break;
	}

3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550
	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);

	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);

	/*
	 * Due to difference in data length between DIF/non-DIF paths,
	 * we need to set word 4 of IOCB here
	 */
	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;

	return 0;
err:
3551 3552 3553 3554 3555 3556 3557
	if (lpfc_cmd->seg_cnt)
		scsi_dma_unmap(scsi_cmnd);
	if (lpfc_cmd->prot_seg_cnt)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     scsi_cmnd->sc_data_direction);

3558
	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3559 3560 3561 3562 3563 3564 3565 3566
			"9084 Cannot setup S/G List for HBA"
			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
			prot_group_type, num_sge);

	lpfc_cmd->seg_cnt = 0;
	lpfc_cmd->prot_seg_cnt = 0;
3567 3568 3569
	return 1;
}

3570 3571 3572 3573 3574 3575 3576 3577 3578
/**
 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
3579 3580
 *	1 - Error
 *	0 - Success
3581 3582 3583 3584 3585 3586 3587
 **/
static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606
/**
 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 * using BlockGuard.
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine wraps the actual DMA mapping function pointer from the
 * lpfc_hba struct.
 *
 * Return codes:
 *	1 - Error
 *	0 - Success
 **/
static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
}

3607
/**
3608
 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628
 * @phba: Pointer to hba context object.
 * @vport: Pointer to vport object.
 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
 * @rsp_iocb: Pointer to response iocb object which reported error.
 *
 * This function posts an event when there is a SCSI command reporting
 * error from the scsi device.
 **/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
	struct lpfc_fast_path_event *fast_path_evt = NULL;
	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
	unsigned long flags;

3629 3630 3631
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
	/* If there is queuefull or busy condition send a scsi event */
	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
		(cmnd->result == SAM_STAT_BUSY)) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.scsi_evt.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.scsi_evt.subcategory =
		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
			LPFC_EVENT_CHECK_COND;
		fast_path_evt->un.check_cond_evt.scsi_event.lun =
			cmnd->device->lun;
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.check_cond_evt.sense_key =
			cmnd->sense_buffer[2] & 0xf;
		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
		     fcpi_parm &&
		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
			((scsi_status == SAM_STAT_GOOD) &&
			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
		/*
		 * If status is good or resid does not match with fcp_param and
		 * there is valid fcpi_parm, then there is a read_check error
		 */
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.read_check_error.header.event_type =
			FC_REG_FABRIC_EVENT;
		fast_path_evt->un.read_check_error.header.subcategory =
			LPFC_EVENT_FCPRDCHKERR;
		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
		fast_path_evt->un.read_check_error.fcpiparam =
			fcpi_parm;
	} else
		return;

	fast_path_evt->vport = vport;
	spin_lock_irqsave(&phba->hbalock, flags);
	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	lpfc_worker_wake_up(phba);
	return;
}
3701 3702

/**
3703
 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3704
 * @phba: The HBA for which this call is being executed.
3705 3706 3707
 * @psb: The scsi buffer which is going to be un-mapped.
 *
 * This routine does DMA un-mapping of scatter gather list of scsi command
3708
 * field of @lpfc_cmd for device with SLI-3 interface spec.
3709
 **/
3710
static void
3711
lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3712 3713 3714 3715 3716 3717 3718
{
	/*
	 * There are only two special cases to consider.  (1) the scsi command
	 * requested scatter-gather usage or (2) the scsi command allocated
	 * a request buffer, but did not request use_sg.  There is a third
	 * case, but it does not require resource deallocation.
	 */
3719 3720
	if (psb->seg_cnt > 0)
		scsi_dma_unmap(psb->pCmd);
3721 3722 3723 3724
	if (psb->prot_seg_cnt > 0)
		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
				scsi_prot_sg_count(psb->pCmd),
				psb->pCmd->sc_data_direction);
3725 3726
}

3727
/**
3728
 * lpfc_handler_fcp_err - FCP response handler
3729 3730 3731 3732 3733 3734 3735 3736
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @rsp_iocb: The response IOCB which contains FCP error.
 *
 * This routine is called to process response IOCB with status field
 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
 * based upon SCSI and FCP error.
 **/
已提交
3737
static void
J
James Smart 已提交
3738 3739
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
		    struct lpfc_iocbq *rsp_iocb)
已提交
3740 3741 3742 3743
{
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3744
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
已提交
3745 3746
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
3747
	uint32_t *lp;
已提交
3748 3749
	uint32_t host_status = DID_OK;
	uint32_t rsplen = 0;
3750
	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
已提交
3751

3752

已提交
3753 3754 3755 3756 3757 3758 3759 3760 3761 3762
	/*
	 *  If this is a task management command, there is no
	 *  scsi packet associated with this lpfc_cmd.  The driver
	 *  consumes it.
	 */
	if (fcpcmd->fcpCntl2) {
		scsi_status = 0;
		goto out;
	}

3763 3764
	if (resp_info & RSP_LEN_VALID) {
		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3765
		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3766 3767 3768 3769 3770 3771 3772 3773 3774
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2719 Invalid response length: "
				 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 rsplen);
			host_status = DID_ERROR;
			goto out;
		}
3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785
		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "2757 Protocol failure detected during "
				 "processing of FCP I/O op: "
				 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
				 cmnd->device->id,
				 cmnd->device->lun, cmnd->cmnd[0],
				 fcprsp->rspInfo3);
			host_status = DID_ERROR;
			goto out;
		}
3786 3787
	}

3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798
	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
		if (snslen > SCSI_SENSE_BUFFERSIZE)
			snslen = SCSI_SENSE_BUFFERSIZE;

		if (resp_info & RSP_LEN_VALID)
		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
	}
	lp = (uint32_t *)cmnd->sense_buffer;

3799 3800 3801 3802 3803 3804 3805 3806 3807
	/* special handling for under run conditions */
	if (!scsi_status && (resp_info & RESID_UNDER)) {
		/* don't log under runs if fcp set... */
		if (vport->cfg_log_verbose & LOG_FCP)
			logit = LOG_FCP_ERROR;
		/* unless operator says so */
		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
			logit = LOG_FCP_UNDER;
	}
3808

3809
	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3810
			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3811 3812 3813 3814 3815 3816 3817
			 "Data: x%x x%x x%x x%x x%x\n",
			 cmnd->cmnd[0], scsi_status,
			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
			 be32_to_cpu(fcprsp->rspResId),
			 be32_to_cpu(fcprsp->rspSnsLen),
			 be32_to_cpu(fcprsp->rspRspLen),
			 fcprsp->rspInfo3);
已提交
3818

3819
	scsi_set_resid(cmnd, 0);
已提交
3820
	if (resp_info & RESID_UNDER) {
3821
		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
已提交
3822

3823
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3824
				 "9025 FCP Read Underrun, expected %d, "
3825 3826 3827 3828
				 "residual %d Data: x%x x%x x%x\n",
				 be32_to_cpu(fcpcmd->fcpDl),
				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
				 cmnd->underflow);
已提交
3829

3830 3831 3832 3833 3834 3835 3836
		/*
		 * If there is an under run check if under run reported by
		 * storage array is same as the under run reported by HBA.
		 * If this is not same, there is a dropped frame.
		 */
		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
			fcpi_parm &&
3837
			(scsi_get_resid(cmnd) != fcpi_parm)) {
3838 3839
			lpfc_printf_vlog(vport, KERN_WARNING,
					 LOG_FCP | LOG_FCP_ERROR,
3840
					 "9026 FCP Read Check Error "
3841 3842 3843 3844
					 "and Underrun Data: x%x x%x x%x x%x\n",
					 be32_to_cpu(fcpcmd->fcpDl),
					 scsi_get_resid(cmnd), fcpi_parm,
					 cmnd->cmnd[0]);
3845
			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3846 3847
			host_status = DID_ERROR;
		}
已提交
3848 3849
		/*
		 * The cmnd->underflow is the minimum number of bytes that must
L
Lucas De Marchi 已提交
3850
		 * be transferred for this command.  Provided a sense condition
已提交
3851 3852 3853 3854 3855
		 * is not present, make sure the actual amount transferred is at
		 * least the underflow value or fail.
		 */
		if (!(resp_info & SNS_LEN_VALID) &&
		    (scsi_status == SAM_STAT_GOOD) &&
3856 3857
		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
		     < cmnd->underflow)) {
3858
			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3859
					 "9027 FCP command x%x residual "
3860 3861
					 "underrun converted to error "
					 "Data: x%x x%x x%x\n",
3862
					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3863
					 scsi_get_resid(cmnd), cmnd->underflow);
已提交
3864 3865 3866
			host_status = DID_ERROR;
		}
	} else if (resp_info & RESID_OVER) {
3867
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3868
				 "9028 FCP command x%x residual overrun error. "
3869
				 "Data: x%x x%x\n", cmnd->cmnd[0],
3870
				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
已提交
3871 3872 3873 3874
		host_status = DID_ERROR;

	/*
	 * Check SLI validation that all the transfer was actually done
3875
	 * (fcpi_parm should be zero).
已提交
3876
	 */
3877
	} else if (fcpi_parm) {
3878
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3879
				 "9029 FCP Data Transfer Check Error: "
J
James Smart 已提交
3880
				 "x%x x%x x%x x%x x%x\n",
3881 3882
				 be32_to_cpu(fcpcmd->fcpDl),
				 be32_to_cpu(fcprsp->rspResId),
J
James Smart 已提交
3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894
				 fcpi_parm, cmnd->cmnd[0], scsi_status);
		switch (scsi_status) {
		case SAM_STAT_GOOD:
		case SAM_STAT_CHECK_CONDITION:
			/* Fabric dropped a data frame. Fail any successful
			 * command in which we detected dropped frames.
			 * A status of good or some check conditions could
			 * be considered a successful command.
			 */
			host_status = DID_ERROR;
			break;
		}
3895
		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
已提交
3896 3897 3898 3899
	}

 out:
	cmnd->result = ScsiResult(host_status, scsi_status);
3900
	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
已提交
3901 3902
}

3903
/**
3904
 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3905 3906
 * @phba: The Hba for which this call is being executed.
 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3907
 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3908 3909 3910 3911 3912
 *
 * This routine assigns scsi command result by looking into response IOCB
 * status field appropriately. This routine handles QUEUE FULL condition as
 * well by ramping down device queue depth.
 **/
已提交
3913 3914 3915 3916 3917 3918
static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
			struct lpfc_iocbq *pIocbOut)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) pIocbIn->context1;
J
James Smart 已提交
3919
	struct lpfc_vport      *vport = pIocbIn->vport;
已提交
3920 3921
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
3922
	struct scsi_cmnd *cmd;
3923
	int result;
3924
	struct scsi_device *tmp_sdev;
3925
	int depth;
3926
	unsigned long flags;
3927
	struct lpfc_fast_path_event *fast_path_evt;
3928
	struct Scsi_Host *shost;
3929
	uint32_t queue_depth, scsi_id;
3930
	uint32_t logit = LOG_FCP;
已提交
3931

3932 3933 3934 3935 3936 3937
	/* Sanity check on return of outstanding command */
	if (!(lpfc_cmd->pCmd))
		return;
	cmd = lpfc_cmd->pCmd;
	shost = cmd->device->host;

3938
	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
已提交
3939
	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3940 3941 3942
	/* pick up SLI4 exhange busy status from HBA */
	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;

3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
	if (lpfc_cmd->prot_data_type) {
		struct scsi_dif_tuple *src = NULL;

		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
		/*
		 * Used to restore any changes to protection
		 * data for error injection.
		 */
		switch (lpfc_cmd->prot_data_type) {
		case LPFC_INJERR_REFTAG:
			src->ref_tag =
				lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_APPTAG:
			src->app_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		case LPFC_INJERR_GUARD:
			src->guard_tag =
				(uint16_t)lpfc_cmd->prot_data;
			break;
		default:
			break;
		}

		lpfc_cmd->prot_data = 0;
		lpfc_cmd->prot_data_type = 0;
		lpfc_cmd->prot_data_segment = NULL;
	}
#endif
3974 3975
	if (pnode && NLP_CHK_NODE_ACT(pnode))
		atomic_dec(&pnode->cmd_pending);
已提交
3976 3977 3978 3979 3980 3981 3982

	if (lpfc_cmd->status) {
		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
		    (lpfc_cmd->result & IOERR_DRVR_MASK))
			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
		else if (lpfc_cmd->status >= IOSTAT_CNT)
			lpfc_cmd->status = IOSTAT_DEFAULT;
3983 3984 3985 3986
		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3987 3988 3989 3990 3991
			logit = 0;
		else
			logit = LOG_FCP | LOG_FCP_UNDER;
		lpfc_printf_vlog(vport, KERN_WARNING, logit,
			 "9030 FCP cmd x%x failed <%d/%d> "
3992 3993 3994
			 "status: x%x result: x%x "
			 "sid: x%x did: x%x oxid: x%x "
			 "Data: x%x x%x\n",
3995 3996 3997 3998
			 cmd->cmnd[0],
			 cmd->device ? cmd->device->id : 0xffff,
			 cmd->device ? cmd->device->lun : 0xffff,
			 lpfc_cmd->status, lpfc_cmd->result,
3999 4000 4001
			 vport->fc_myDID, pnode->nlp_DID,
			 phba->sli_rev == LPFC_SLI_REV4 ?
			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4002 4003
			 pIocbOut->iocb.ulpContext,
			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
已提交
4004 4005 4006 4007

		switch (lpfc_cmd->status) {
		case IOSTAT_FCP_RSP_ERROR:
			/* Call FCP RSP handler to determine result */
J
James Smart 已提交
4008
			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
已提交
4009 4010 4011
			break;
		case IOSTAT_NPORT_BSY:
		case IOSTAT_FABRIC_BSY:
4012
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
			fast_path_evt = lpfc_alloc_fast_evt(phba);
			if (!fast_path_evt)
				break;
			fast_path_evt->un.fabric_evt.event_type =
				FC_REG_FABRIC_EVENT;
			fast_path_evt->un.fabric_evt.subcategory =
				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
					&pnode->nlp_portname,
					sizeof(struct lpfc_name));
				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
					&pnode->nlp_nodename,
					sizeof(struct lpfc_name));
			}
			fast_path_evt->vport = vport;
			fast_path_evt->work_evt.evt =
				LPFC_EVT_FASTPATH_MGMT_EVT;
			spin_lock_irqsave(&phba->hbalock, flags);
			list_add_tail(&fast_path_evt->work_evt.evt_listp,
				&phba->work_list);
			spin_unlock_irqrestore(&phba->hbalock, flags);
			lpfc_worker_wake_up(phba);
已提交
4037
			break;
4038
		case IOSTAT_LOCAL_REJECT:
4039
		case IOSTAT_REMOTE_STOP:
4040 4041 4042 4043 4044 4045 4046 4047 4048
			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
			    lpfc_cmd->result ==
					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
				break;
			}
4049
			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4050
			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4051 4052
			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4053
				cmd->result = ScsiResult(DID_REQUEUE, 0);
4054
				break;
4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070
			}
			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
					/*
					 * This is a response for a BG enabled
					 * cmd. Parse BG error
					 */
					lpfc_parse_bg_err(phba, lpfc_cmd,
							pIocbOut);
					break;
				} else {
					lpfc_printf_vlog(vport, KERN_WARNING,
							LOG_BG,
							"9031 non-zero BGSTAT "
4071
							"on unprotected cmd\n");
4072 4073
				}
			}
4074 4075 4076 4077 4078 4079 4080 4081
			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
				&& (phba->sli_rev == LPFC_SLI_REV4)
				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
				/* This IO was aborted by the target, we don't
				 * know the rxid and because we did not send the
				 * ABTS we cannot generate and RRQ.
				 */
				lpfc_set_rrq_active(phba, pnode,
4082 4083
					lpfc_cmd->cur_iocbq.sli4_lxritag,
					0, 0);
4084
			}
4085
		/* else: fall through */
已提交
4086 4087 4088 4089 4090
		default:
			cmd->result = ScsiResult(DID_ERROR, 0);
			break;
		}

4091
		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4092
		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4093 4094
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
						 SAM_STAT_BUSY);
4095
	} else
已提交
4096 4097 4098 4099 4100
		cmd->result = ScsiResult(DID_OK, 0);

	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
		uint32_t *lp = (uint32_t *)cmd->sense_buffer;

4101 4102 4103 4104 4105 4106
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0710 Iodone <%d/%d> cmd %p, error "
				 "x%x SNS x%x x%x Data: x%x x%x\n",
				 cmd->device->id, cmd->device->lun, cmd,
				 cmd->result, *lp, *(lp + 3), cmd->retries,
				 scsi_get_resid(cmd));
已提交
4107 4108
	}

4109
	lpfc_update_stats(phba, lpfc_cmd);
4110
	result = cmd->result;
4111 4112 4113
	if (vport->cfg_max_scsicmpl_time &&
	   time_after(jiffies, lpfc_cmd->start_time +
		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4114
		spin_lock_irqsave(shost->host_lock, flags);
4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126
		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
			if (pnode->cmd_qdepth >
				atomic_read(&pnode->cmd_pending) &&
				(atomic_read(&pnode->cmd_pending) >
				LPFC_MIN_TGT_QDEPTH) &&
				((cmd->cmnd[0] == READ_10) ||
				(cmd->cmnd[0] == WRITE_10)))
				pnode->cmd_qdepth =
					atomic_read(&pnode->cmd_pending);

			pnode->last_change_time = jiffies;
		}
4127
		spin_unlock_irqrestore(shost->host_lock, flags);
4128
	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4129
		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
4130
		   time_after(jiffies, pnode->last_change_time +
4131
			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4132
			spin_lock_irqsave(shost->host_lock, flags);
4133 4134 4135 4136 4137 4138
			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
				/ 100;
			depth = depth ? depth : 1;
			pnode->cmd_qdepth += depth;
			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4139
			pnode->last_change_time = jiffies;
4140
			spin_unlock_irqrestore(shost->host_lock, flags);
4141
		}
4142 4143
	}

4144
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4145 4146 4147 4148

	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
	queue_depth = cmd->device->queue_depth;
	scsi_id = cmd->device->id;
4149 4150
	cmd->scsi_done(cmd);

4151
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4152
		spin_lock_irqsave(&phba->hbalock, flags);
4153
		lpfc_cmd->pCmd = NULL;
4154
		spin_unlock_irqrestore(&phba->hbalock, flags);
4155

4156 4157 4158 4159
		/*
		 * If there is a thread waiting for command completion
		 * wake up the thread.
		 */
4160
		spin_lock_irqsave(shost->host_lock, flags);
4161 4162
		if (lpfc_cmd->waitq)
			wake_up(lpfc_cmd->waitq);
4163
		spin_unlock_irqrestore(shost->host_lock, flags);
4164 4165 4166 4167
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return;
	}

4168
	if (!result)
4169
		lpfc_rampup_queue_depth(vport, queue_depth);
4170

4171 4172 4173 4174
	/*
	 * Check for queue full.  If the lun is reporting queue full, then
	 * back off the lun queue depth to prevent target overloads.
	 */
4175 4176
	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
	    NLP_CHK_NODE_ACT(pnode)) {
4177 4178
		shost_for_each_device(tmp_sdev, shost) {
			if (tmp_sdev->id != scsi_id)
4179 4180
				continue;
			depth = scsi_track_queue_full(tmp_sdev,
4181 4182 4183
						      tmp_sdev->queue_depth-1);
			if (depth <= 0)
				continue;
4184 4185 4186
			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
					 "0711 detected queue full - lun queue "
					 "depth adjusted to %d.\n", depth);
4187
			lpfc_send_sdev_queuedepth_change_event(phba, vport,
4188 4189 4190
							       pnode,
							       tmp_sdev->lun,
							       depth+1, depth);
4191 4192 4193
		}
	}

4194
	spin_lock_irqsave(&phba->hbalock, flags);
4195
	lpfc_cmd->pCmd = NULL;
4196
	spin_unlock_irqrestore(&phba->hbalock, flags);
4197

4198 4199 4200 4201
	/*
	 * If there is a thread waiting for command completion
	 * wake up the thread.
	 */
4202
	spin_lock_irqsave(shost->host_lock, flags);
4203 4204
	if (lpfc_cmd->waitq)
		wake_up(lpfc_cmd->waitq);
4205
	spin_unlock_irqrestore(shost->host_lock, flags);
4206

4207
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4208 4209
}

4210
/**
4211
 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227
 * @data: A pointer to the immediate command data portion of the IOCB.
 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
 *
 * The routine copies the entire FCP command from @fcp_cmnd to @data while
 * byte swapping the data to big endian format for transmission on the wire.
 **/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
	int i, j;
	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
	     i += sizeof(uint32_t), j++) {
		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
	}
}

4228
/**
4229
 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4230 4231 4232 4233 4234
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: The scsi command which needs to send.
 * @pnode: Pointer to lpfc_nodelist.
 *
 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4235
 * to transfer for device with SLI3 interface spec.
4236
 **/
已提交
4237
static void
4238
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
J
James Smart 已提交
4239
		    struct lpfc_nodelist *pnode)
已提交
4240
{
J
James Smart 已提交
4241
	struct lpfc_hba *phba = vport->phba;
已提交
4242 4243 4244 4245 4246
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
	int datadir = scsi_cmnd->sc_data_direction;
4247
	char tag[2];
4248 4249
	uint8_t *ptr;
	bool sli4;
已提交
4250

4251 4252 4253
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

已提交
4254
	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4255 4256
	/* clear task management bits */
	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
已提交
4257

4258 4259
	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
			&lpfc_cmd->fcp_cmnd->fcp_lun);
已提交
4260

4261 4262 4263 4264 4265 4266 4267
	ptr = &fcp_cmnd->fcpCdb[0];
	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
		ptr += scsi_cmnd->cmd_len;
		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
	}

4268 4269
	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
		switch (tag[0]) {
已提交
4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280
		case HEAD_OF_QUEUE_TAG:
			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
			break;
		case ORDERED_QUEUE_TAG:
			fcp_cmnd->fcpCntl1 = ORDERED_Q;
			break;
		default:
			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
			break;
		}
	} else
4281
		fcp_cmnd->fcpCntl1 = SIMPLE_Q;
已提交
4282

4283 4284
	sli4 = (phba->sli_rev == LPFC_SLI_REV4);

已提交
4285 4286 4287 4288 4289 4290
	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
4291
	if (scsi_sg_count(scsi_cmnd)) {
已提交
4292 4293
		if (datadir == DMA_TO_DEVICE) {
			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4294 4295 4296
			if (sli4)
				iocb_cmd->ulpPU = PARM_READ_CHECK;
			else {
4297 4298
				iocb_cmd->un.fcpi.fcpi_parm = 0;
				iocb_cmd->ulpPU = 0;
4299
			}
已提交
4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314
			fcp_cmnd->fcpCntl3 = WRITE_DATA;
			phba->fc4OutputRequests++;
		} else {
			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
			iocb_cmd->ulpPU = PARM_READ_CHECK;
			fcp_cmnd->fcpCntl3 = READ_DATA;
			phba->fc4InputRequests++;
		}
	} else {
		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
		iocb_cmd->un.fcpi.fcpi_parm = 0;
		iocb_cmd->ulpPU = 0;
		fcp_cmnd->fcpCntl3 = 0;
		phba->fc4ControlRequests++;
	}
4315 4316
	if (phba->sli_rev == 3 &&
	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4317
		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4318 4319 4320 4321 4322
	/*
	 * Finish initializing those IOCB fields that are independent
	 * of the scsi_cmnd request_buffer
	 */
	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4323
	if (sli4)
4324 4325
		piocbq->iocb.ulpContext =
		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
已提交
4326 4327
	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
		piocbq->iocb.ulpFCP2Rcvy = 1;
4328 4329
	else
		piocbq->iocb.ulpFCP2Rcvy = 0;
已提交
4330 4331 4332 4333 4334

	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
	piocbq->context1  = lpfc_cmd;
	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
J
James Smart 已提交
4335
	piocbq->vport = vport;
已提交
4336 4337
}

4338
/**
4339
 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4340 4341 4342 4343 4344
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @lun: Logical unit number.
 * @task_mgmt_cmd: SCSI task management command.
 *
4345 4346
 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
 * for device with SLI-3 interface spec.
4347 4348 4349 4350 4351
 *
 * Return codes:
 *   0 - Error
 *   1 - Success
 **/
已提交
4352
static int
4353
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
已提交
4354
			     struct lpfc_scsi_buf *lpfc_cmd,
4355
			     unsigned int lun,
已提交
4356 4357 4358 4359 4360
			     uint8_t task_mgmt_cmd)
{
	struct lpfc_iocbq *piocbq;
	IOCB_t *piocb;
	struct fcp_cmnd *fcp_cmnd;
4361
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
已提交
4362 4363
	struct lpfc_nodelist *ndlp = rdata->pnode;

4364 4365
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
已提交
4366 4367 4368
		return 0;

	piocbq = &(lpfc_cmd->cur_iocbq);
J
James Smart 已提交
4369 4370
	piocbq->vport = vport;

已提交
4371 4372 4373
	piocb = &piocbq->iocb;

	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4374 4375 4376
	/* Clear out any old data in the FCP command area */
	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
已提交
4377
	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4378 4379
	if (vport->phba->sli_rev == 3 &&
	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4380
		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
4381 4382
	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
	piocb->ulpContext = ndlp->nlp_rpi;
4383 4384 4385 4386
	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
		piocb->ulpContext =
		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
	}
已提交
4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398
	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
		piocb->ulpFCP2Rcvy = 1;
	}
	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);

	/* ulpTimeout is only one byte */
	if (lpfc_cmd->timeout > 0xff) {
		/*
		 * Do not timeout the command at the firmware level.
		 * The driver will provide the timeout mechanism.
		 */
		piocb->ulpTimeout = 0;
4399
	} else
已提交
4400
		piocb->ulpTimeout = lpfc_cmd->timeout;
4401

4402 4403
	if (vport->phba->sli_rev == LPFC_SLI_REV4)
		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4404

4405
	return 1;
4406 4407 4408
}

/**
L
Lucas De Marchi 已提交
4409
 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420
 * @phba: The hba struct for which this call is being executed.
 * @dev_grp: The HBA PCI-Device group number.
 *
 * This routine sets up the SCSI interface API function jump table in @phba
 * struct.
 * Returns: 0 - success, -ENODEV - failure.
 **/
int
lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{

4421 4422 4423
	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;

4424 4425 4426 4427
	switch (dev_grp) {
	case LPFC_PCI_DEV_LP:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4428
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4429
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4430
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4431
		break;
4432 4433 4434
	case LPFC_PCI_DEV_OC:
		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4435
		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4436
		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4437
		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4438
		break;
4439 4440 4441 4442 4443 4444 4445 4446
	default:
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"1418 Invalid HBA PCI-device group: 0x%x\n",
				dev_grp);
		return -ENODEV;
		break;
	}
	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4447
	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4448 4449 4450
	return 0;
}

4451
/**
4452
 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4453 4454 4455 4456 4457 4458 4459
 * @phba: The Hba for which this call is being executed.
 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
 * @rspiocbq: Pointer to lpfc_iocbq data structure.
 *
 * This routine is IOCB completion routine for device reset and target reset
 * routine. This routine release scsi buffer associated with lpfc_cmd.
 **/
4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
			struct lpfc_iocbq *cmdiocbq,
			struct lpfc_iocbq *rspiocbq)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) cmdiocbq->context1;
	if (lpfc_cmd)
		lpfc_release_scsi_buf(phba, lpfc_cmd);
	return;
}

4472
/**
4473
 * lpfc_info - Info entry point of scsi_host_template data structure
4474 4475 4476 4477 4478 4479 4480
 * @host: The scsi host for which this call is being executed.
 *
 * This routine provides module information about hba.
 *
 * Reutrn code:
 *   Pointer to char - Success.
 **/
已提交
4481 4482 4483
const char *
lpfc_info(struct Scsi_Host *host)
{
J
James Smart 已提交
4484 4485
	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4486
	int len, link_speed = 0;
已提交
4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505
	static char  lpfcinfobuf[384];

	memset(lpfcinfobuf,0,384);
	if (phba && phba->pcidev){
		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
		len = strlen(lpfcinfobuf);
		snprintf(lpfcinfobuf + len,
			384-len,
			" on PCI bus %02x device %02x irq %d",
			phba->pcidev->bus->number,
			phba->pcidev->devfn,
			phba->pcidev->irq);
		len = strlen(lpfcinfobuf);
		if (phba->Port[0]) {
			snprintf(lpfcinfobuf + len,
				 384-len,
				 " port %s",
				 phba->Port);
		}
4506
		len = strlen(lpfcinfobuf);
4507 4508 4509 4510 4511 4512 4513 4514
		if (phba->sli_rev <= LPFC_SLI_REV3) {
			link_speed = lpfc_sli_port_speed_get(phba);
		} else {
			if (phba->sli4_hba.link_state.logical_speed)
				link_speed =
				      phba->sli4_hba.link_state.logical_speed;
			else
				link_speed = phba->sli4_hba.link_state.speed;
4515
		}
4516 4517 4518
		if (link_speed != 0)
			snprintf(lpfcinfobuf + len, 384-len,
				 " Logical Link Speed: %d Mbps", link_speed);
已提交
4519 4520 4521 4522
	}
	return lpfcinfobuf;
}

4523
/**
4524
 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4525 4526 4527 4528 4529
 * @phba: The Hba for which this call is being executed.
 *
 * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
 * The default value of cfg_poll_tmo is 10 milliseconds.
 **/
4530 4531 4532 4533 4534
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
	unsigned long  poll_tmo_expires =
		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));

4535
	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4536 4537 4538 4539
		mod_timer(&phba->fcp_poll_timer,
			  poll_tmo_expires);
}

4540
/**
4541
 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4542 4543 4544 4545
 * @phba: The Hba for which this call is being executed.
 *
 * This routine starts the fcp_poll_timer of @phba.
 **/
4546 4547 4548 4549 4550
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
	lpfc_poll_rearm_timer(phba);
}

4551
/**
4552
 * lpfc_poll_timeout - Restart polling timer
4553 4554 4555 4556 4557 4558
 * @ptr: Map to lpfc_hba data structure pointer.
 *
 * This routine restarts fcp_poll timer, when FCP ring  polling is enable
 * and FCP Ring interrupt is disable.
 **/

4559 4560
void lpfc_poll_timeout(unsigned long ptr)
{
J
James Smart 已提交
4561
	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4562 4563

	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4564 4565 4566
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);

4567 4568 4569 4570 4571
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}
}

4572
/**
4573
 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584
 * @cmnd: Pointer to scsi_cmnd data structure.
 * @done: Pointer to done routine.
 *
 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
 * This routine prepares an IOCB from scsi command and provides to firmware.
 * The @done callback is invoked after driver finished processing the command.
 *
 * Return value :
 *   0 - Success
 *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
 **/
已提交
4585
static int
4586
lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
已提交
4587
{
J
James Smart 已提交
4588 4589
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
4590
	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4591
	struct lpfc_nodelist *ndlp;
4592
	struct lpfc_scsi_buf *lpfc_cmd;
4593 4594
	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
	int err;
已提交
4595

4596 4597 4598
	err = fc_remote_port_chkready(rport);
	if (err) {
		cmnd->result = err;
已提交
4599 4600
		goto out_fail_command;
	}
4601
	ndlp = rdata->pnode;
已提交
4602

4603
	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4604
		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4605

4606 4607 4608 4609
		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
				" op:%02x str=%s without registering for"
				" BlockGuard - Rejecting command\n",
4610 4611 4612 4613 4614
				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
				dif_op_str[scsi_get_prot_op(cmnd)]);
		goto out_fail_command;
	}

已提交
4615
	/*
4616 4617
	 * Catch race where our node has transitioned, but the
	 * transport is still transitioning.
已提交
4618
	 */
4619 4620
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
		goto out_tgt_busy;
4621
	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4622
		goto out_tgt_busy;
4623

4624
	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
已提交
4625
	if (lpfc_cmd == NULL) {
4626
		lpfc_rampdown_queue_depth(phba);
4627

4628 4629 4630
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0707 driver's buffer pool is empty, "
				 "IO busied\n");
已提交
4631 4632 4633 4634 4635 4636 4637 4638 4639 4640
		goto out_host_busy;
	}

	/*
	 * Store the midlayer's command structure for the completion phase
	 * and complete the command initialization.
	 */
	lpfc_cmd->pCmd  = cmnd;
	lpfc_cmd->rdata = rdata;
	lpfc_cmd->timeout = 0;
4641
	lpfc_cmd->start_time = jiffies;
已提交
4642 4643
	cmnd->host_scribble = (unsigned char *)lpfc_cmd;

4644
	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4645
		if (vport->phba->cfg_enable_bg) {
4646 4647
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4648 4649 4650 4651 4652 4653 4654
					 "9033 BLKGRD: rcvd %s cmd:x%x "
					 "sector x%llx cnt %u pt %x\n",
					 dif_op_str[scsi_get_prot_op(cmnd)],
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
					 blk_rq_sectors(cmnd->request),
					 (cmnd->cmnd[1]>>5));
4655
		}
4656 4657
		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
	} else {
4658
		if (vport->phba->cfg_enable_bg) {
4659 4660
			lpfc_printf_vlog(vport,
					 KERN_INFO, LOG_SCSI_CMD,
4661 4662 4663 4664
					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
					 "x%x sector x%llx cnt %u pt %x\n",
					 cmnd->cmnd[0],
					 (unsigned long long)scsi_get_lba(cmnd),
4665
					 blk_rq_sectors(cmnd->request),
4666
					 (cmnd->cmnd[1]>>5));
4667
		}
4668 4669 4670
		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
	}

已提交
4671 4672 4673
	if (err)
		goto out_host_busy_free_buf;

J
James Smart 已提交
4674
	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
已提交
4675

4676
	atomic_inc(&ndlp->cmd_pending);
4677
	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4678
				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4679 4680
	if (err) {
		atomic_dec(&ndlp->cmd_pending);
已提交
4681
		goto out_host_busy_free_buf;
4682
	}
4683
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4684 4685 4686
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);

4687 4688 4689 4690
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
4691 4692 4693
	return 0;

 out_host_busy_free_buf:
4694
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4695
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
4696 4697 4698
 out_host_busy:
	return SCSI_MLQUEUE_HOST_BUSY;

4699 4700 4701
 out_tgt_busy:
	return SCSI_MLQUEUE_TARGET_BUSY;

已提交
4702
 out_fail_command:
4703
	cmnd->scsi_done(cmnd);
已提交
4704 4705 4706
	return 0;
}

J
Jeff Garzik 已提交
4707

4708
/**
4709
 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4710 4711 4712 4713 4714 4715 4716 4717
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine aborts @cmnd pending in base driver.
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
已提交
4718
static int
4719
lpfc_abort_handler(struct scsi_cmnd *cmnd)
已提交
4720
{
J
James Smart 已提交
4721 4722 4723
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
4724 4725
	struct lpfc_iocbq *iocb;
	struct lpfc_iocbq *abtsiocb;
已提交
4726 4727
	struct lpfc_scsi_buf *lpfc_cmd;
	IOCB_t *cmd, *icmd;
4728
	int ret = SUCCESS, status = 0;
4729
	unsigned long flags;
4730
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
已提交
4731

4732
	status = fc_block_scsi_eh(cmnd);
4733
	if (status != 0 && status != SUCCESS)
4734
		return status;
4735

4736
	spin_lock_irqsave(&phba->hbalock, flags);
4737 4738
	/* driver queued commands are in process of being flushed */
	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4739
		spin_unlock_irqrestore(&phba->hbalock, flags);
4740 4741 4742 4743 4744 4745
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3168 SCSI Layer abort requested I/O has been "
			"flushed by LLD.\n");
		return FAILED;
	}

4746
	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4747
	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4748
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
4749 4750
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4751
			 "x%x ID %d LUN %d\n",
4752
			 SUCCESS, cmnd->device->id, cmnd->device->lun);
J
James Smart 已提交
4753 4754
		return SUCCESS;
	}
已提交
4755

4756 4757 4758
	iocb = &lpfc_cmd->cur_iocbq;
	/* the command is in process of being cancelled */
	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4759
		spin_unlock_irqrestore(&phba->hbalock, flags);
4760 4761 4762 4763 4764
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3169 SCSI Layer abort requested I/O has been "
			"cancelled by LLD.\n");
		return FAILED;
	}
4765 4766 4767 4768
	/*
	 * If pCmd field of the corresponding lpfc_scsi_buf structure
	 * points to a different SCSI command, then the driver has
	 * already completed this command, but the midlayer did not
4769
	 * see the completion before the eh fired. Just return SUCCESS.
4770
	 */
4771 4772 4773 4774 4775 4776
	if (lpfc_cmd->pCmd != cmnd) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			"3170 SCSI Layer abort requested I/O has been "
			"completed by LLD.\n");
		goto out_unlock;
	}
已提交
4777

4778
	BUG_ON(iocb->context1 != lpfc_cmd);
已提交
4779

4780
	abtsiocb = __lpfc_sli_get_iocbq(phba);
4781 4782
	if (abtsiocb == NULL) {
		ret = FAILED;
4783
		goto out_unlock;
已提交
4784 4785 4786
	}

	/*
4787 4788 4789
	 * The scsi command can not be in txq and it is in flight because the
	 * pCmd is still pointig at the SCSI command we have to abort. There
	 * is no need to search the txcmplq. Just send an abort to the FW.
已提交
4790 4791
	 */

4792 4793 4794 4795
	cmd = &iocb->iocb;
	icmd = &abtsiocb->iocb;
	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4796 4797 4798 4799
	if (phba->sli_rev == LPFC_SLI_REV4)
		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
	else
		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
已提交
4800

4801 4802
	icmd->ulpLe = 1;
	icmd->ulpClass = cmd->ulpClass;
4803 4804 4805

	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4806
	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4807

J
James Smart 已提交
4808
	if (lpfc_is_link_up(phba))
4809 4810 4811
		icmd->ulpCommand = CMD_ABORT_XRI_CN;
	else
		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
已提交
4812

4813
	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
J
James Smart 已提交
4814
	abtsiocb->vport = vport;
4815
	/* no longer need the lock after this point */
4816
	spin_unlock_irqrestore(&phba->hbalock, flags);
4817

4818 4819
	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
	    IOCB_ERROR) {
4820 4821 4822 4823
		lpfc_sli_release_iocbq(phba, abtsiocb);
		ret = FAILED;
		goto out;
	}
已提交
4824

4825
	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4826 4827
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4828

4829
	lpfc_cmd->waitq = &waitq;
4830
	/* Wait for abort to complete */
4831 4832
	wait_event_timeout(waitq,
			  (lpfc_cmd->pCmd != cmnd),
4833
			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4834
	lpfc_cmd->waitq = NULL;
已提交
4835

4836 4837
	if (lpfc_cmd->pCmd == cmnd) {
		ret = FAILED;
4838 4839
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0748 abort handler timed out waiting "
4840 4841 4842 4843
				 "for abortng I/O (xri:x%x) to complete: "
				 "ret %#x, ID %d, LUN %d\n",
				 iocb->sli4_xritag, ret,
				 cmnd->device->id, cmnd->device->lun);
已提交
4844
	}
4845
	goto out;
已提交
4846

4847
out_unlock:
4848
	spin_unlock_irqrestore(&phba->hbalock, flags);
4849
out:
4850 4851
	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4852 4853
			 "LUN %d\n", ret, cmnd->device->id,
			 cmnd->device->lun);
4854
	return ret;
4855 4856
}

4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879
static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
{
	switch (task_mgmt_cmd) {
	case FCP_ABORT_TASK_SET:
		return "ABORT_TASK_SET";
	case FCP_CLEAR_TASK_SET:
		return "FCP_CLEAR_TASK_SET";
	case FCP_BUS_RESET:
		return "FCP_BUS_RESET";
	case FCP_LUN_RESET:
		return "FCP_LUN_RESET";
	case FCP_TARGET_RESET:
		return "FCP_TARGET_RESET";
	case FCP_CLEAR_ACA:
		return "FCP_CLEAR_ACA";
	case FCP_TERMINATE_TASK:
		return "FCP_TERMINATE_TASK";
	default:
		return "unknown";
	}
}

4880
/**
4881 4882 4883 4884 4885 4886
 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
 * @vport: The virtual port for which this call is being executed.
 * @rdata: Pointer to remote port local data
 * @tgt_id: Target ID of remote device.
 * @lun_id: Lun number for the TMF
 * @task_mgmt_cmd: type of TMF to send
4887
 *
4888 4889
 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
 * a remote port.
4890
 *
4891 4892 4893
 * Return Code:
 *   0x2003 - Error
 *   0x2002 - Success.
4894
 **/
已提交
4895
static int
4896 4897 4898
lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
		    unsigned  tgt_id, unsigned int lun_id,
		    uint8_t task_mgmt_cmd)
已提交
4899
{
J
James Smart 已提交
4900
	struct lpfc_hba   *phba = vport->phba;
4901
	struct lpfc_scsi_buf *lpfc_cmd;
4902 4903
	struct lpfc_iocbq *iocbq;
	struct lpfc_iocbq *iocbqrsp;
4904
	struct lpfc_nodelist *pnode = rdata->pnode;
4905
	int ret;
4906
	int status;
已提交
4907

4908
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4909
		return FAILED;
4910

4911
	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
已提交
4912
	if (lpfc_cmd == NULL)
4913
		return FAILED;
已提交
4914
	lpfc_cmd->timeout = 60;
4915
	lpfc_cmd->rdata = rdata;
已提交
4916

4917 4918
	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
					   task_mgmt_cmd);
4919 4920 4921 4922
	if (!status) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
已提交
4923

4924
	iocbq = &lpfc_cmd->cur_iocbq;
4925
	iocbqrsp = lpfc_sli_get_iocbq(phba);
4926 4927 4928 4929
	if (iocbqrsp == NULL) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
4930

4931
	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4932
			 "0702 Issue %s to TGT %d LUN %d "
4933
			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
4934
			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
4935 4936
			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
			 iocbq->iocb_flag);
4937

4938
	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
4939
					  iocbq, iocbqrsp, lpfc_cmd->timeout);
4940 4941 4942 4943 4944
	if (status != IOCB_SUCCESS) {
		if (status == IOCB_TIMEDOUT) {
			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
			ret = TIMEOUT_ERROR;
		} else
4945
			ret = FAILED;
4946 4947
		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4948 4949
			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
			 "iocb_flag x%x\n",
4950 4951
			 lpfc_taskmgmt_name(task_mgmt_cmd),
			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
4952 4953
			 iocbqrsp->iocb.un.ulpWord[4],
			 iocbq->iocb_flag);
4954 4955 4956
	} else if (status == IOCB_BUSY)
		ret = FAILED;
	else
4957 4958
		ret = SUCCESS;

4959
	lpfc_sli_release_iocbq(phba, iocbqrsp);
4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982

	if (ret != TIMEOUT_ERROR)
		lpfc_release_scsi_buf(phba, lpfc_cmd);

	return ret;
}

/**
 * lpfc_chk_tgt_mapped -
 * @vport: The virtual port to check on
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine delays until the scsi target (aka rport) for the
 * command exists (is present and logged in) or we declare it non-existent.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4983
	struct lpfc_nodelist *pnode;
4984 4985
	unsigned long later;

4986 4987 4988 4989 4990 4991
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038
	/*
	 * If target is not in a MAPPED state, delay until
	 * target is rediscovered or devloss timeout expires.
	 */
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies)) {
		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
			return FAILED;
		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
			return SUCCESS;
		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
		rdata = cmnd->device->hostdata;
		if (!rdata)
			return FAILED;
		pnode = rdata->pnode;
	}
	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
		return FAILED;
	return SUCCESS;
}

/**
 * lpfc_reset_flush_io_context -
 * @vport: The virtual port (scsi_host) for the flush context
 * @tgt_id: If aborting by Target contect - specifies the target id
 * @lun_id: If aborting by Lun context - specifies the lun id
 * @context: specifies the context level to flush at.
 *
 * After a reset condition via TMF, we need to flush orphaned i/o
 * contexts from the adapter. This routine aborts any contexts
 * outstanding, then waits for their completions. The wait is
 * bounded by devloss_tmo though.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
			uint64_t lun_id, lpfc_ctx_cmd context)
{
	struct lpfc_hba   *phba = vport->phba;
	unsigned long later;
	int cnt;

	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5039
	if (cnt)
5040
		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
5041
				    tgt_id, lun_id, context);
5042 5043 5044
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies) && cnt) {
		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5045
		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
已提交
5046 5047
	}
	if (cnt) {
5048
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5049 5050 5051 5052 5053 5054
			"0724 I/O flush failure for context %s : cnt x%x\n",
			((context == LPFC_CTX_LUN) ? "LUN" :
			 ((context == LPFC_CTX_TGT) ? "TGT" :
			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
			cnt);
		return FAILED;
已提交
5055
	}
5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075
	return SUCCESS;
}

/**
 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a device reset by sending a LUN_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
5076
	struct lpfc_nodelist *pnode;
5077 5078 5079
	unsigned tgt_id = cmnd->device->id;
	unsigned int lun_id = cmnd->device->lun;
	struct lpfc_scsi_event_header scsi_event;
5080
	int status, ret = SUCCESS;
5081

5082 5083 5084 5085 5086 5087
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0798 Device Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5088
	status = fc_block_scsi_eh(cmnd);
5089
	if (status != 0 && status != SUCCESS)
5090
		return status;
5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0721 Device Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
	scsi_event.lun = lun_id;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
						FCP_LUN_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0713 SCSI layer issued Device Reset (%d, %d) "
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5121
	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5122
						LPFC_CTX_LUN);
5123
	return ret;
5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142
}

/**
 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a target reset by sending a TARGET_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
5143
	struct lpfc_nodelist *pnode;
5144 5145 5146
	unsigned tgt_id = cmnd->device->id;
	unsigned int lun_id = cmnd->device->lun;
	struct lpfc_scsi_event_header scsi_event;
5147
	int status, ret = SUCCESS;
5148

5149 5150 5151 5152 5153 5154
	if (!rdata) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0799 Target Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}
	pnode = rdata->pnode;
5155
	status = fc_block_scsi_eh(cmnd);
5156
	if (status != 0 && status != SUCCESS)
5157
		return status;
5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187

	status = lpfc_chk_tgt_mapped(vport, cmnd);
	if (status == FAILED) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			"0722 Target Reset rport failure: rdata x%p\n", rdata);
		return FAILED;
	}

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);

	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
					FCP_TARGET_RESET);

	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0723 SCSI layer issued Target Reset (%d, %d) "
			 "return x%x\n", tgt_id, lun_id, status);

	/*
	 * We have to clean up i/o as : they may be orphaned by the TMF;
	 * or if the TMF failed, they may be in an indeterminate state.
	 * So, continue on.
	 * We will report success if all the i/o aborts successfully.
	 */
5188 5189 5190
	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
					  LPFC_CTX_TGT);
	return ret;
已提交
5191 5192
}

5193
/**
5194
 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5195 5196
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
5197 5198
 * This routine does target reset to all targets on @cmnd->device->host.
 * This emulates Parallel SCSI Bus Reset Semantics.
5199
 *
5200 5201 5202
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
5203
 **/
5204
static int
5205
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
已提交
5206
{
J
James Smart 已提交
5207 5208
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
已提交
5209
	struct lpfc_nodelist *ndlp = NULL;
5210
	struct lpfc_scsi_event_header scsi_event;
5211 5212
	int match;
	int ret = SUCCESS, status, i;
5213 5214 5215 5216 5217 5218 5219

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));

5220 5221
	fc_host_post_vendor_event(shost, fc_get_event_number(),
		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
已提交
5222

5223
	status = fc_block_scsi_eh(cmnd);
5224
	if (status != 0 && status != SUCCESS)
5225
		return status;
5226

已提交
5227 5228 5229 5230 5231
	/*
	 * Since the driver manages a single bus device, reset all
	 * targets known to the driver.  Should any target reset
	 * fail, this routine returns failure to the midlayer.
	 */
5232
	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5233
		/* Search for mapped node by target ID */
已提交
5234
		match = 0;
J
James Smart 已提交
5235 5236
		spin_lock_irq(shost->host_lock);
		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5237 5238
			if (!NLP_CHK_NODE_ACT(ndlp))
				continue;
5239 5240 5241
			if (vport->phba->cfg_fcp2_no_tgt_reset &&
			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
				continue;
5242
			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5243
			    ndlp->nlp_sid == i &&
5244
			    ndlp->rport) {
已提交
5245 5246 5247 5248
				match = 1;
				break;
			}
		}
J
James Smart 已提交
5249
		spin_unlock_irq(shost->host_lock);
已提交
5250 5251
		if (!match)
			continue;
5252 5253 5254 5255 5256

		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
					i, 0, FCP_TARGET_RESET);

		if (status != SUCCESS) {
5257 5258 5259
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0700 Bus Reset on target %d failed\n",
					 i);
5260
			ret = FAILED;
已提交
5261 5262
		}
	}
5263
	/*
5264 5265 5266 5267
	 * We have to clean up i/o as : they may be orphaned by the TMFs
	 * above; or if any of the TMFs failed, they may be in an
	 * indeterminate state.
	 * We will report success if all the i/o aborts successfully.
5268
	 */
5269 5270 5271

	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
	if (status != SUCCESS)
5272
		ret = FAILED;
5273

5274 5275
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
已提交
5276 5277 5278
	return ret;
}

5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302
/**
 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does host reset to the adaptor port. It brings the HBA
 * offline, performs a board restart, and then brings the board back online.
 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
 * reject all outstanding SCSI commands to the host and error returned
 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
 * of error handling, it will only return error if resetting of the adapter
 * is not successful; in all other cases, will return success.
 *
 * Return code :
 *  0x2003 - Error
 *  0x2002 - Success
 **/
static int
lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba *phba = vport->phba;
	int rc, ret = SUCCESS;

5303
	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315
	lpfc_offline(phba);
	rc = lpfc_sli_brdrestart(phba);
	if (rc)
		ret = FAILED;
	lpfc_online(phba);
	lpfc_unblock_mgmt_io(phba);

	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
			"3172 SCSI layer issued Host Reset Data: x%x\n", ret);
	return ret;
}

5316
/**
5317
 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328
 * @sdev: Pointer to scsi_device.
 *
 * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
 * globally available list of scsi buffers. This routine also makes sure scsi
 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
 * of scsi buffer exists for the lifetime of the driver.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
已提交
5329 5330 5331
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
J
James Smart 已提交
5332 5333
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
5334
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5335
	uint32_t total = 0;
已提交
5336
	uint32_t num_to_alloc = 0;
5337
	int num_allocated = 0;
5338
	uint32_t sdev_cnt;
已提交
5339

5340
	if (!rport || fc_remote_port_chkready(rport))
已提交
5341 5342
		return -ENXIO;

5343
	sdev->hostdata = rport->dd_data;
5344
	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
已提交
5345 5346 5347 5348

	/*
	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
	 * available list of scsi buffers.  Don't allocate more than the
5349 5350 5351
	 * HBA limit conveyed to the midlayer via the host structure.  The
	 * formula accounts for the lun_queue_depth + error handlers + 1
	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
已提交
5352 5353
	 */
	total = phba->total_scsi_bufs;
5354
	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5355

5356 5357 5358 5359
	/* If allocated buffers are enough do nothing */
	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
		return 0;

5360 5361
	/* Allow some exchanges to be available always to complete discovery */
	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5362 5363 5364
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0704 At limitation of %d preallocated "
				 "command buffers\n", total);
已提交
5365
		return 0;
5366 5367 5368
	/* Allow some exchanges to be available always to complete discovery */
	} else if (total + num_to_alloc >
		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5369 5370 5371 5372 5373 5374
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0705 Allocation request of %d "
				 "command buffers will exceed max of %d.  "
				 "Reducing allocation request to %d.\n",
				 num_to_alloc, phba->cfg_hba_queue_depth,
				 (phba->cfg_hba_queue_depth - total));
已提交
5375 5376
		num_to_alloc = phba->cfg_hba_queue_depth - total;
	}
5377 5378
	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
	if (num_to_alloc != num_allocated) {
5379 5380 5381 5382 5383
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0708 Allocation request of %d "
					 "command buffers did not succeed.  "
					 "Allocated %d buffers.\n",
					 num_to_alloc, num_allocated);
已提交
5384
	}
5385 5386
	if (num_allocated > 0)
		phba->total_scsi_bufs += num_allocated;
已提交
5387 5388 5389
	return 0;
}

5390
/**
5391
 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5392 5393 5394 5395 5396 5397 5398 5399 5400
 * @sdev: Pointer to scsi_device.
 *
 * This routine configures following items
 *   - Tag command queuing support for @sdev if supported.
 *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
 *
 * Return codes:
 *   0 - Success
 **/
已提交
5401 5402 5403
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
J
James Smart 已提交
5404 5405
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
5406 5407

	if (sdev->tagged_supported)
5408
		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
已提交
5409
	else
5410
		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
已提交
5411

5412
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5413 5414
		lpfc_sli_handle_fast_ring_event(phba,
			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5415 5416 5417 5418
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
5419 5420 5421
	return 0;
}

5422
/**
5423
 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5424 5425 5426 5427
 * @sdev: Pointer to scsi_device.
 *
 * This routine sets @sdev hostatdata filed to null.
 **/
已提交
5428 5429 5430
static void
lpfc_slave_destroy(struct scsi_device *sdev)
{
5431 5432 5433
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
	atomic_dec(&phba->sdev_cnt);
已提交
5434 5435 5436 5437
	sdev->hostdata = NULL;
	return;
}

5438

已提交
5439 5440 5441 5442 5443 5444
struct scsi_host_template lpfc_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
5445 5446
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
5447
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5448
	.eh_host_reset_handler  = lpfc_host_reset_handler,
已提交
5449 5450 5451
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
5452
	.scan_finished		= lpfc_scan_finished,
已提交
5453
	.this_id		= -1,
5454
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
已提交
5455 5456
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
J
James Smart 已提交
5457
	.shost_attrs		= lpfc_hba_attrs,
5458
	.max_sectors		= 0xFFFF,
5459
	.vendor_id		= LPFC_NL_VENDOR_ID,
5460
	.change_queue_depth	= lpfc_change_queue_depth,
5461
	.change_queue_type	= lpfc_change_queue_type,
已提交
5462
};
5463 5464 5465 5466 5467 5468 5469

struct scsi_host_template lpfc_vport_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
5470 5471
	.eh_device_reset_handler = lpfc_device_reset_handler,
	.eh_target_reset_handler = lpfc_target_reset_handler,
5472 5473 5474 5475 5476 5477
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
5478
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5479 5480 5481 5482
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_vport_attrs,
	.max_sectors		= 0xFFFF,
5483
	.change_queue_depth	= lpfc_change_queue_depth,
5484
	.change_queue_type	= lpfc_change_queue_type,
5485
};