lpfc_nvme.c 90.6 KB
Newer Older
1 2 3
/*******************************************************************
 * This file is part of the Emulex Linux Device Driver for         *
 * Fibre Channel Host Bus Adapters.                                *
4
 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5
 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6 7
 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
 * EMULEX and SLI are trademarks of Emulex.                        *
J
James Smart 已提交
8
 * www.broadcom.com                                                *
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
 ********************************************************************/
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/unaligned.h>
#include <linux/crc-t10dif.h>
#include <net/checksum.h>

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>

#include <linux/nvme.h>
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
#include "lpfc_version.h"
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc.h"
#include "lpfc_nvme.h"
#include "lpfc_scsi.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
55
#include "lpfc_debugfs.h"
56 57 58 59

/* NVME initiator-based functions */

static struct lpfc_nvme_buf *
60 61
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
		  int expedite);
62 63 64 65

static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);

J
James Smart 已提交
66
static struct nvme_fc_port_template lpfc_nvme_template;
67

68 69 70
static union lpfc_wqe128 lpfc_iread_cmd_template;
static union lpfc_wqe128 lpfc_iwrite_cmd_template;
static union lpfc_wqe128 lpfc_icmnd_cmd_template;
71 72 73

/* Setup WQE templates for NVME IOs */
void
74
lpfc_nvme_cmd_template(void)
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
{
	union lpfc_wqe128 *wqe;

	/* IREAD template */
	wqe = &lpfc_iread_cmd_template;
	memset(wqe, 0, sizeof(union lpfc_wqe128));

	/* Word 0, 1, 2 - BDE is variable */

	/* Word 3 - cmd_buff_len, payload_offset_len is zero */

	/* Word 4 - total_xfer_len is variable */

	/* Word 5 - is zero */

	/* Word 6 - ctxt_tag, xri_tag is variable */

	/* Word 7 */
	bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
	bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
	bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
	bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);

	/* Word 8 - abort_tag is variable */

	/* Word 9  - reqtag is variable */

	/* Word 10 - dbde, wqes is variable */
	bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
	bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
	bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
	bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
	bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
	bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);

	/* Word 11 - pbde is variable */
	bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
	bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
	bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);

	/* Word 12 - is zero */

	/* Word 13, 14, 15 - PBDE is variable */

	/* IWRITE template */
	wqe = &lpfc_iwrite_cmd_template;
	memset(wqe, 0, sizeof(union lpfc_wqe128));

	/* Word 0, 1, 2 - BDE is variable */

	/* Word 3 - cmd_buff_len, payload_offset_len is zero */

	/* Word 4 - total_xfer_len is variable */

	/* Word 5 - initial_xfer_len is variable */

	/* Word 6 - ctxt_tag, xri_tag is variable */

	/* Word 7 */
	bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
	bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
	bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
	bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);

	/* Word 8 - abort_tag is variable */

	/* Word 9  - reqtag is variable */

	/* Word 10 - dbde, wqes is variable */
	bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
	bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
	bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
	bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
	bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
	bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);

	/* Word 11 - pbde is variable */
	bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
	bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
	bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);

	/* Word 12 - is zero */

	/* Word 13, 14, 15 - PBDE is variable */

	/* ICMND template */
	wqe = &lpfc_icmnd_cmd_template;
	memset(wqe, 0, sizeof(union lpfc_wqe128));

	/* Word 0, 1, 2 - BDE is variable */

	/* Word 3 - payload_offset_len is variable */

	/* Word 4, 5 - is zero */

	/* Word 6 - ctxt_tag, xri_tag is variable */

	/* Word 7 */
	bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
	bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
	bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
	bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);

	/* Word 8 - abort_tag is variable */

	/* Word 9  - reqtag is variable */

	/* Word 10 - dbde, wqes is variable */
	bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
	bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
	bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
	bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
	bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
	bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);

	/* Word 11 */
	bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
	bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
	bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);

	/* Word 12, 13, 14, 15 - is zero */
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
/**
 * lpfc_nvme_create_queue -
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
 * @handle: An opaque driver handle used in follow-up calls.
 *
 * Driver registers this routine to preallocate and initialize any
 * internal data structures to bind the @qidx to its internal IO queues.
 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
 *
 * Return value :
 *   0 - Success
 *   -EINVAL - Unsupported input value.
 *   -ENOMEM - Could not alloc necessary memory
 **/
static int
lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
		       unsigned int qidx, u16 qsize,
		       void **handle)
{
	struct lpfc_nvme_lport *lport;
	struct lpfc_vport *vport;
	struct lpfc_nvme_qhandle *qhandle;
	char *str;

223 224 225
	if (!pnvme_lport->private)
		return -ENOMEM;

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
	vport = lport->vport;
	qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
	if (qhandle == NULL)
		return -ENOMEM;

	qhandle->cpu_id = smp_processor_id();
	qhandle->qidx = qidx;
	/*
	 * NVME qidx == 0 is the admin queue, so both admin queue
	 * and first IO queue will use MSI-X vector and associated
	 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
	 */
	if (qidx) {
		str = "IO ";  /* IO queue */
		qhandle->index = ((qidx - 1) %
			vport->phba->cfg_nvme_io_channel);
	} else {
		str = "ADM";  /* Admin queue */
		qhandle->index = qidx;
	}

248
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
			 "6073 Binding %s HdwQueue %d  (cpu %d) to "
			 "io_channel %d qhandle %p\n", str,
			 qidx, qhandle->cpu_id, qhandle->index, qhandle);
	*handle = (void *)qhandle;
	return 0;
}

/**
 * lpfc_nvme_delete_queue -
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
 * @handle: An opaque driver handle from lpfc_nvme_create_queue
 *
 * Driver registers this routine to free
 * any internal data structures to bind the @qidx to its internal
 * IO queues.
 *
 * Return value :
 *   0 - Success
 *   TODO:  What are the failure codes.
 **/
static void
lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
		       unsigned int qidx,
		       void *handle)
{
	struct lpfc_nvme_lport *lport;
	struct lpfc_vport *vport;

278 279 280
	if (!pnvme_lport->private)
		return;

281 282 283 284
	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
	vport = lport->vport;

	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
285
			"6001 ENTER.  lpfc_pnvme %p, qidx x%x qhandle %p\n",
286 287 288 289 290 291 292 293 294
			lport, qidx, handle);
	kfree(handle);
}

static void
lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
{
	struct lpfc_nvme_lport *lport = localport->private;

295 296 297 298
	lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
			 "6173 localport %p delete complete\n",
			 lport);

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
	/* release any threads waiting for the unreg to complete */
	complete(&lport->lport_unreg_done);
}

/* lpfc_nvme_remoteport_delete
 *
 * @remoteport: Pointer to an nvme transport remoteport instance.
 *
 * This is a template downcall.  NVME transport calls this function
 * when it has completed the unregistration of a previously
 * registered remoteport.
 *
 * Return value :
 * None
 */
void
lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
	struct lpfc_nvme_rport *rport = remoteport->private;
	struct lpfc_vport *vport;
	struct lpfc_nodelist *ndlp;

	ndlp = rport->ndlp;
	if (!ndlp)
		goto rport_err;

	vport = ndlp->vport;
	if (!vport)
		goto rport_err;

	/* Remove this rport from the lport's list - memory is owned by the
	 * transport. Remove the ndlp reference for the NVME transport before
331
	 * calling state machine to remove the node.
332 333
	 */
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
334
			"6146 remoteport delete of remoteport %p\n",
335
			remoteport);
336
	spin_lock_irq(&vport->phba->hbalock);
337 338 339 340 341 342 343 344

	/* The register rebind might have occurred before the delete
	 * downcall.  Guard against this race.
	 */
	if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
		ndlp->nrport = NULL;
		ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
	}
345 346 347 348 349
	spin_unlock_irq(&vport->phba->hbalock);

	/* Remove original register reference. The host transport
	 * won't reference this rport/remoteport any further.
	 */
350 351 352
	lpfc_nlp_put(ndlp);

 rport_err:
353
	return;
354 355 356 357 358 359 360
}

static void
lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
		       struct lpfc_wcqe_complete *wcqe)
{
	struct lpfc_vport *vport = cmdwqe->vport;
361
	struct lpfc_nvme_lport *lport;
362 363 364 365 366 367 368
	uint32_t status;
	struct nvmefc_ls_req *pnvme_lsreq;
	struct lpfc_dmabuf *buf_ptr;
	struct lpfc_nodelist *ndlp;

	pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
	status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
369

370 371 372 373 374 375 376 377 378
	if (vport->localport) {
		lport = (struct lpfc_nvme_lport *)vport->localport->private;
		if (lport) {
			atomic_inc(&lport->fc4NvmeLsCmpls);
			if (status) {
				if (bf_get(lpfc_wcqe_c_xb, wcqe))
					atomic_inc(&lport->cmpl_ls_xb);
				atomic_inc(&lport->cmpl_ls_err);
			}
379
		}
380 381
	}

382 383 384
	ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
			 "6047 nvme cmpl Enter "
385 386
			 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
			 "lsreg:%p bmp:%p ndlp:%p\n",
387 388
			 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
			 cmdwqe->sli4_xritag, status,
389
			 (wcqe->parameter & 0xffff),
390 391
			 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);

392 393 394
	lpfc_nvmeio_data(phba, "NVME LS  CMPL: xri x%x stat x%x parm x%x\n",
			 cmdwqe->sli4_xritag, status, wcqe->parameter);

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	if (cmdwqe->context3) {
		buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
		kfree(buf_ptr);
		cmdwqe->context3 = NULL;
	}
	if (pnvme_lsreq->done)
		pnvme_lsreq->done(pnvme_lsreq, status);
	else
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
				 "6046 nvme cmpl without done call back? "
				 "Data %p DID %x Xri: %x status %x\n",
				pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
				cmdwqe->sli4_xritag, status);
	if (ndlp) {
		lpfc_nlp_put(ndlp);
		cmdwqe->context1 = NULL;
	}
	lpfc_sli_release_iocbq(phba, cmdwqe);
}

static int
lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
		  struct lpfc_dmabuf *inp,
419 420 421 422 423
		  struct nvmefc_ls_req *pnvme_lsreq,
		  void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
			       struct lpfc_wcqe_complete *),
		  struct lpfc_nodelist *ndlp, uint32_t num_entry,
		  uint32_t tmo, uint8_t retry)
424
{
425 426
	struct lpfc_hba *phba = vport->phba;
	union lpfc_wqe128 *wqe;
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
	struct lpfc_iocbq *genwqe;
	struct ulp_bde64 *bpl;
	struct ulp_bde64 bde;
	int i, rc, xmit_len, first_len;

	/* Allocate buffer for  command WQE */
	genwqe = lpfc_sli_get_iocbq(phba);
	if (genwqe == NULL)
		return 1;

	wqe = &genwqe->wqe;
	memset(wqe, 0, sizeof(union lpfc_wqe));

	genwqe->context3 = (uint8_t *)bmp;
	genwqe->iocb_flag |= LPFC_IO_NVME_LS;

	/* Save for completion so we can release these resources */
	genwqe->context1 = lpfc_nlp_get(ndlp);
	genwqe->context2 = (uint8_t *)pnvme_lsreq;
	/* Fill in payload, bp points to frame payload */

	if (!tmo)
		/* FC spec states we need 3 * ratov for CT requests */
		tmo = (3 * phba->fc_ratov);

	/* For this command calculate the xmit length of the request bde. */
	xmit_len = 0;
	first_len = 0;
	bpl = (struct ulp_bde64 *)bmp->virt;
	for (i = 0; i < num_entry; i++) {
		bde.tus.w = bpl[i].tus.w;
		if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
			break;
		xmit_len += bde.tus.f.bdeSize;
		if (i == 0)
			first_len = xmit_len;
	}

	genwqe->rsvd2 = num_entry;
	genwqe->hba_wqidx = 0;

	/* Words 0 - 2 */
	wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
	wqe->generic.bde.tus.f.bdeSize = first_len;
	wqe->generic.bde.addrLow = bpl[0].addrLow;
	wqe->generic.bde.addrHigh = bpl[0].addrHigh;

	/* Word 3 */
	wqe->gen_req.request_payload_len = first_len;

	/* Word 4 */

	/* Word 5 */
	bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
	bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
	bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
483
	bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
	bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);

	/* Word 6 */
	bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
	bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);

	/* Word 7 */
	bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
	bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
	bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
	bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);

	/* Word 8 */
	wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;

	/* Word 9 */
	bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);

	/* Word 10 */
	bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
	bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
	bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
	bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
	bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);

	/* Word 11 */
	bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
	bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);


	/* Issue GEN REQ WQE for NPORT <did> */
	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
			 "6050 Issue GEN REQ WQE to NPORT x%x "
			 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
			 ndlp->nlp_DID, genwqe->iotag,
			 vport->port_state,
			genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
	genwqe->wqe_cmpl = cmpl;
	genwqe->iocb_cmpl = NULL;
	genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
	genwqe->vport = vport;
	genwqe->retry = retry;

528 529 530
	lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
			 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);

531
	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
532
	if (rc) {
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
				 "6045 Issue GEN REQ WQE to NPORT x%x "
				 "Data: x%x x%x\n",
				 ndlp->nlp_DID, genwqe->iotag,
				 vport->port_state);
		lpfc_sli_release_iocbq(phba, genwqe);
		return 1;
	}
	return 0;
}

/**
 * lpfc_nvme_ls_req - Issue an Link Service request
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @lpfc_nvme_lport: Pointer to the driver's local port data
 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 *
 * Driver registers this routine to handle any link service request
 * from the nvme_fc transport to a remote nvme-aware port.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static int
lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
		 struct nvme_fc_remote_port *pnvme_rport,
		 struct nvmefc_ls_req *pnvme_lsreq)
{
	int ret = 0;
	struct lpfc_nvme_lport *lport;
564
	struct lpfc_nvme_rport *rport;
565 566 567 568
	struct lpfc_vport *vport;
	struct lpfc_nodelist *ndlp;
	struct ulp_bde64 *bpl;
	struct lpfc_dmabuf *bmp;
J
James Smart 已提交
569
	uint16_t ntype, nstate;
570 571 572 573 574 575 576 577 578 579 580 581 582

	/* there are two dma buf in the request, actually there is one and
	 * the second one is just the start address + cmd size.
	 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
	 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
	 * because the nvem layer owns the data bufs.
	 * We do not have to break these packets open, we don't care what is in
	 * them. And we do not have to look at the resonse data, we only care
	 * that we got a response. All of the caring is going to happen in the
	 * nvme-fc layer.
	 */

	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
583
	rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
584 585 586
	if (unlikely(!lport) || unlikely(!rport))
		return -EINVAL;

587 588
	vport = lport->vport;

589 590 591
	if (vport->load_flag & FC_UNLOADING)
		return -ENODEV;

592 593
	/* Need the ndlp.  It is stored in the driver's rport. */
	ndlp = rport->ndlp;
J
James Smart 已提交
594 595
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
596 597
				 "6051 Remoteport %p, rport has invalid ndlp. "
				 "Failing LS Req\n", pnvme_rport);
J
James Smart 已提交
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
		return -ENODEV;
	}

	/* The remote node has to be a mapped nvme target or an
	 * unmapped nvme initiator or it's an error.
	 */
	ntype = ndlp->nlp_type;
	nstate = ndlp->nlp_state;
	if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
	    (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
				 "6088 DID x%06x not ready for "
				 "IO. State x%x, Type x%x\n",
				 pnvme_rport->port_id,
				 ndlp->nlp_state, ndlp->nlp_type);
		return -ENODEV;
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
	}
	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
	if (!bmp) {

		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
				 "6044 Could not find node for DID %x\n",
				 pnvme_rport->port_id);
		return 2;
	}
	INIT_LIST_HEAD(&bmp->list);
	bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
	if (!bmp->virt) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
				 "6042 Could not find node for DID %x\n",
				 pnvme_rport->port_id);
		kfree(bmp);
		return 3;
	}
	bpl = (struct ulp_bde64 *)bmp->virt;
	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
	bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
	bpl->tus.f.bdeFlags = 0;
	bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
	bpl->tus.w = le32_to_cpu(bpl->tus.w);
	bpl++;

	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
	bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
	bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
	bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
	bpl->tus.w = le32_to_cpu(bpl->tus.w);

	/* Expand print to include key fields. */
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
648 649 650
			 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
			 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
			 ndlp->nlp_DID,
651 652
			 pnvme_lport, pnvme_rport,
			 pnvme_lsreq, pnvme_lsreq->rqstlen,
653 654
			 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
			 &pnvme_lsreq->rspdma);
655

656
	atomic_inc(&lport->fc4NvmeLsRequests);
657 658 659 660 661 662 663 664

	/* Hardcode the wait to 30 seconds.  Connections are failing otherwise.
	 * This code allows it all to work.
	 */
	ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
				pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
				ndlp, 2, 30, 0);
	if (ret != WQE_SUCCESS) {
665
		atomic_inc(&lport->xmt_ls_err);
666
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
				 "6052 EXIT. issue ls wqe failed lport %p, "
				 "rport %p lsreq%p Status %x DID %x\n",
				 pnvme_lport, pnvme_rport, pnvme_lsreq,
				 ret, ndlp->nlp_DID);
		lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
		kfree(bmp);
		return ret;
	}

	/* Stub in routine and return 0 for now. */
	return ret;
}

/**
 * lpfc_nvme_ls_abort - Issue an Link Service request
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @lpfc_nvme_lport: Pointer to the driver's local port data
 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 *
 * Driver registers this routine to handle any link service request
 * from the nvme_fc transport to a remote nvme-aware port.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static void
lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
		   struct nvme_fc_remote_port *pnvme_rport,
		   struct nvmefc_ls_req *pnvme_lsreq)
{
	struct lpfc_nvme_lport *lport;
	struct lpfc_vport *vport;
	struct lpfc_hba *phba;
	struct lpfc_nodelist *ndlp;
	LIST_HEAD(abort_list);
	struct lpfc_sli_ring *pring;
	struct lpfc_iocbq *wqe, *next_wqe;

	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
707 708
	if (unlikely(!lport))
		return;
709 710 711
	vport = lport->vport;
	phba = vport->phba;

712 713 714
	if (vport->load_flag & FC_UNLOADING)
		return;

715 716 717 718 719 720 721 722 723 724 725
	ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
	if (!ndlp) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
				 "6049 Could not find node for DID %x\n",
				 pnvme_rport->port_id);
		return;
	}

	/* Expand print to include key fields. */
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
			 "6040 ENTER.  lport %p, rport %p lsreq %p rqstlen:%d "
726
			 "rsplen:%d %pad %pad\n",
727 728
			 pnvme_lport, pnvme_rport,
			 pnvme_lsreq, pnvme_lsreq->rqstlen,
729 730
			 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
			 &pnvme_lsreq->rspdma);
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751

	/*
	 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
	 * that need an ABTS.  The IOs need to stay on the txcmplq so that
	 * the abort operation completes them successfully.
	 */
	pring = phba->sli4_hba.nvmels_wq->pring;
	spin_lock_irq(&phba->hbalock);
	spin_lock(&pring->ring_lock);
	list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
		/* Add to abort_list on on NDLP match. */
		if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
			wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
			list_add_tail(&wqe->dlist, &abort_list);
		}
	}
	spin_unlock(&pring->ring_lock);
	spin_unlock_irq(&phba->hbalock);

	/* Abort the targeted IOs and remove them from the abort list. */
	list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
752
		atomic_inc(&lport->xmt_ls_abort);
753 754 755 756 757 758 759 760
		spin_lock_irq(&phba->hbalock);
		list_del_init(&wqe->dlist);
		lpfc_sli_issue_abort_iotag(phba, pring, wqe);
		spin_unlock_irq(&phba->hbalock);
	}
}

/* Fix up the existing sgls for NVME IO. */
761
static inline void
762 763 764 765
lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
		       struct lpfc_nvme_buf *lpfc_ncmd,
		       struct nvmefc_fcp_req *nCmd)
{
766
	struct lpfc_hba  *phba = vport->phba;
767 768 769 770
	struct sli4_sge *sgl;
	union lpfc_wqe128 *wqe;
	uint32_t *wptr, *dptr;

771 772 773 774 775 776
	/*
	 * Get a local pointer to the built-in wqe and correct
	 * the cmd size to match NVME's 96 bytes and fix
	 * the dma address.
	 */

777
	wqe = &lpfc_ncmd->cur_iocbq.wqe;
778

779 780 781 782 783 784 785 786 787
	/*
	 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
	 * match NVME.  NVME sends 96 bytes. Also, use the
	 * nvme commands command and response dma addresses
	 * rather than the virtual memory to ease the restore
	 * operation.
	 */
	sgl = lpfc_ncmd->nvme_sgl;
	sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
788 789 790 791 792 793 794 795 796
	if (phba->cfg_nvme_embed_cmd) {
		sgl->addr_hi = 0;
		sgl->addr_lo = 0;

		/* Word 0-2 - NVME CMND IU (embedded payload) */
		wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
		wqe->generic.bde.tus.f.bdeSize = 56;
		wqe->generic.bde.addrHigh = 0;
		wqe->generic.bde.addrLow =  64;  /* Word 16 */
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827

		/* Word 10  - dbde is 0, wqes is 1 in template */

		/*
		 * Embed the payload in the last half of the WQE
		 * WQE words 16-30 get the NVME CMD IU payload
		 *
		 * WQE words 16-19 get payload Words 1-4
		 * WQE words 20-21 get payload Words 6-7
		 * WQE words 22-29 get payload Words 16-23
		 */
		wptr = &wqe->words[16];  /* WQE ptr */
		dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
		dptr++;			/* Skip Word 0 in payload */

		*wptr++ = *dptr++;	/* Word 1 */
		*wptr++ = *dptr++;	/* Word 2 */
		*wptr++ = *dptr++;	/* Word 3 */
		*wptr++ = *dptr++;	/* Word 4 */
		dptr++;			/* Skip Word 5 in payload */
		*wptr++ = *dptr++;	/* Word 6 */
		*wptr++ = *dptr++;	/* Word 7 */
		dptr += 8;		/* Skip Words 8-15 in payload */
		*wptr++ = *dptr++;	/* Word 16 */
		*wptr++ = *dptr++;	/* Word 17 */
		*wptr++ = *dptr++;	/* Word 18 */
		*wptr++ = *dptr++;	/* Word 19 */
		*wptr++ = *dptr++;	/* Word 20 */
		*wptr++ = *dptr++;	/* Word 21 */
		*wptr++ = *dptr++;	/* Word 22 */
		*wptr   = *dptr;	/* Word 23 */
828 829 830 831 832 833 834 835 836
	} else {
		sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
		sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));

		/* Word 0-2 - NVME CMND IU Inline BDE */
		wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
		wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
		wqe->generic.bde.addrHigh = sgl->addr_hi;
		wqe->generic.bde.addrLow =  sgl->addr_lo;
837 838 839 840

		/* Word 10 */
		bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
		bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
841
	}
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856

	sgl++;

	/* Setup the physical region for the FCP RSP */
	sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
	sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
	sgl->word2 = le32_to_cpu(sgl->word2);
	if (nCmd->sg_cnt)
		bf_set(lpfc_sli4_sge_last, sgl, 0);
	else
		bf_set(lpfc_sli4_sge_last, sgl, 1);
	sgl->word2 = cpu_to_le32(sgl->word2);
	sgl->sge_len = cpu_to_le32(nCmd->rsplen);
}

857 858 859 860 861 862
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
static void
lpfc_nvme_ktime(struct lpfc_hba *phba,
		struct lpfc_nvme_buf *lpfc_ncmd)
{
	uint64_t seg1, seg2, seg3, seg4;
863
	uint64_t segsum;
864 865 866 867 868 869 870

	if (!lpfc_ncmd->ts_last_cmd ||
	    !lpfc_ncmd->ts_cmd_start ||
	    !lpfc_ncmd->ts_cmd_wqput ||
	    !lpfc_ncmd->ts_isr_cmpl ||
	    !lpfc_ncmd->ts_data_nvme)
		return;
871 872 873

	if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
		return;
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
	if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
		return;
	if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
		return;
	if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
		return;
	if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
		return;
	/*
	 * Segment 1 - Time from Last FCP command cmpl is handed
	 * off to NVME Layer to start of next command.
	 * Segment 2 - Time from Driver receives a IO cmd start
	 * from NVME Layer to WQ put is done on IO cmd.
	 * Segment 3 - Time from Driver WQ put is done on IO cmd
	 * to MSI-X ISR for IO cmpl.
	 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
	 * cmpl is handled off to the NVME Layer.
	 */
	seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
893 894
	if (seg1 > 5000000)  /* 5 ms - for sequential IOs only */
		seg1 = 0;
895 896 897

	/* Calculate times relative to start of IO */
	seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
898 899 900 901 902 903 904 905 906 907 908 909
	segsum = seg2;
	seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
	if (segsum > seg3)
		return;
	seg3 -= segsum;
	segsum += seg3;

	seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
	if (segsum > seg4)
		return;
	seg4 -= segsum;

910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	phba->ktime_data_samples++;
	phba->ktime_seg1_total += seg1;
	if (seg1 < phba->ktime_seg1_min)
		phba->ktime_seg1_min = seg1;
	else if (seg1 > phba->ktime_seg1_max)
		phba->ktime_seg1_max = seg1;
	phba->ktime_seg2_total += seg2;
	if (seg2 < phba->ktime_seg2_min)
		phba->ktime_seg2_min = seg2;
	else if (seg2 > phba->ktime_seg2_max)
		phba->ktime_seg2_max = seg2;
	phba->ktime_seg3_total += seg3;
	if (seg3 < phba->ktime_seg3_min)
		phba->ktime_seg3_min = seg3;
	else if (seg3 > phba->ktime_seg3_max)
		phba->ktime_seg3_max = seg3;
	phba->ktime_seg4_total += seg4;
	if (seg4 < phba->ktime_seg4_min)
		phba->ktime_seg4_min = seg4;
	else if (seg4 > phba->ktime_seg4_max)
		phba->ktime_seg4_max = seg4;

	lpfc_ncmd->ts_last_cmd = 0;
	lpfc_ncmd->ts_cmd_start = 0;
	lpfc_ncmd->ts_cmd_wqput  = 0;
	lpfc_ncmd->ts_isr_cmpl = 0;
	lpfc_ncmd->ts_data_nvme = 0;
}
#endif

940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
/**
 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @lpfc_nvme_lport: Pointer to the driver's local port data
 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 *
 * Driver registers this routine as it io request handler.  This
 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static void
lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
			  struct lpfc_wcqe_complete *wcqe)
{
	struct lpfc_nvme_buf *lpfc_ncmd =
		(struct lpfc_nvme_buf *)pwqeIn->context1;
	struct lpfc_vport *vport = pwqeIn->vport;
	struct nvmefc_fcp_req *nCmd;
	struct nvme_fc_ersp_iu *ep;
	struct nvme_fc_cmd_iu *cp;
	struct lpfc_nodelist *ndlp;
965
	struct lpfc_nvme_fcpreq_priv *freqpriv;
966
	struct lpfc_nvme_lport *lport;
967 968
	struct lpfc_nvme_ctrl_stat *cstat;
	uint32_t code, status, idx;
969 970 971 972
	uint16_t cid, sqhd, data;
	uint32_t *ptr;

	/* Sanity check on return of outstanding command */
973
	if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd) {
974 975 976 977 978 979 980 981
		if (!lpfc_ncmd) {
			lpfc_printf_vlog(vport, KERN_ERR,
					 LOG_NODE | LOG_NVME_IOERR,
					 "6071 Null lpfc_ncmd pointer. No "
					 "release, skip completion\n");
			return;
		}

982
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
983
				 "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
984 985
				 "nvmeCmd %p\n",
				 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
986 987 988

		/* Release the lpfc_ncmd regardless of the missing elements. */
		lpfc_release_nvme_buf(phba, lpfc_ncmd);
989 990 991
		return;
	}
	nCmd = lpfc_ncmd->nvmeCmd;
992
	status = bf_get(lpfc_wcqe_c_status, wcqe);
993

994 995 996 997 998 999 1000 1001 1002 1003 1004
	if (vport->localport) {
		lport = (struct lpfc_nvme_lport *)vport->localport->private;
		if (lport) {
			idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
			cstat = &lport->cstat[idx];
			atomic_inc(&cstat->fc4NvmeIoCmpls);
			if (status) {
				if (bf_get(lpfc_wcqe_c_xb, wcqe))
					atomic_inc(&lport->cmpl_fcp_xb);
				atomic_inc(&lport->cmpl_fcp_err);
			}
1005
		}
1006
	}
1007

1008 1009
	lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
			 lpfc_ncmd->cur_iocbq.sli4_xritag,
1010
			 status, wcqe->parameter);
1011 1012 1013 1014
	/*
	 * Catch race where our node has transitioned, but the
	 * transport is still transitioning.
	 */
1015
	ndlp = lpfc_ncmd->ndlp;
1016
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1017 1018 1019
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
				 "6062 Ignoring NVME cmpl.  No ndlp\n");
		goto out_err;
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
	}

	code = bf_get(lpfc_wcqe_c_code, wcqe);
	if (code == CQE_CODE_NVME_ERSP) {
		/* For this type of CQE, we need to rebuild the rsp */
		ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;

		/*
		 * Get Command Id from cmd to plug into response. This
		 * code is not needed in the next NVME Transport drop.
		 */
		cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
		cid = cp->sqe.common.command_id;

		/*
		 * RSN is in CQE word 2
		 * SQHD is in CQE Word 3 bits 15:0
		 * Cmd Specific info is in CQE Word 1
		 * and in CQE Word 0 bits 15:0
		 */
		sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);

		/* Now lets build the NVME ERSP IU */
		ep->iu_len = cpu_to_be16(8);
		ep->rsn = wcqe->parameter;
		ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
		ep->rsvd12 = 0;
		ptr = (uint32_t *)&ep->cqe.result.u64;
		*ptr++ = wcqe->total_data_placed;
		data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
		*ptr = (uint32_t)data;
		ep->cqe.sq_head = sqhd;
		ep->cqe.sq_id =  nCmd->sqid;
		ep->cqe.command_id = cid;
		ep->cqe.status = 0;

		lpfc_ncmd->status = IOSTAT_SUCCESS;
		lpfc_ncmd->result = 0;
		nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
		nCmd->transferred_length = nCmd->payload_length;
	} else {
1061
		lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1062
		lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089

		/* For NVME, the only failure path that results in an
		 * IO error is when the adapter rejects it.  All other
		 * conditions are a success case and resolved by the
		 * transport.
		 * IOSTAT_FCP_RSP_ERROR means:
		 * 1. Length of data received doesn't match total
		 *    transfer length in WQE
		 * 2. If the RSP payload does NOT match these cases:
		 *    a. RSP length 12/24 bytes and all zeros
		 *    b. NVME ERSP
		 */
		switch (lpfc_ncmd->status) {
		case IOSTAT_SUCCESS:
			nCmd->transferred_length = wcqe->total_data_placed;
			nCmd->rcv_rsplen = 0;
			nCmd->status = 0;
			break;
		case IOSTAT_FCP_RSP_ERROR:
			nCmd->transferred_length = wcqe->total_data_placed;
			nCmd->rcv_rsplen = wcqe->parameter;
			nCmd->status = 0;
			/* Sanity check */
			if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
				break;
			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
					 "6081 NVME Completion Protocol Error: "
J
James Smart 已提交
1090 1091 1092
					 "xri %x status x%x result x%x "
					 "placed x%x\n",
					 lpfc_ncmd->cur_iocbq.sli4_xritag,
1093 1094 1095
					 lpfc_ncmd->status, lpfc_ncmd->result,
					 wcqe->total_data_placed);
			break;
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
		case IOSTAT_LOCAL_REJECT:
			/* Let fall through to set command final state. */
			if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
				lpfc_printf_vlog(vport, KERN_INFO,
					 LOG_NVME_IOERR,
					 "6032 Delay Aborted cmd %p "
					 "nvme cmd %p, xri x%x, "
					 "xb %d\n",
					 lpfc_ncmd, nCmd,
					 lpfc_ncmd->cur_iocbq.sli4_xritag,
					 bf_get(lpfc_wcqe_c_xb, wcqe));
1107 1108
		default:
out_err:
1109
			lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
J
James Smart 已提交
1110
					 "6072 NVME Completion Error: xri %x "
1111
					 "status x%x result x%x placed x%x\n",
J
James Smart 已提交
1112
					 lpfc_ncmd->cur_iocbq.sli4_xritag,
1113 1114 1115 1116
					 lpfc_ncmd->status, lpfc_ncmd->result,
					 wcqe->total_data_placed);
			nCmd->transferred_length = 0;
			nCmd->rcv_rsplen = 0;
1117
			nCmd->status = NVME_SC_INTERNAL;
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		}
	}

	/* pick up SLI4 exhange busy condition */
	if (bf_get(lpfc_wcqe_c_xb, wcqe))
		lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
	else
		lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;

	/* Update stats and complete the IO.  There is
	 * no need for dma unprep because the nvme_transport
	 * owns the dma address.
	 */
1131
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1132
	if (lpfc_ncmd->ts_cmd_start) {
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
		lpfc_ncmd->ts_data_nvme = ktime_get_ns();
		phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
		lpfc_nvme_ktime(phba, lpfc_ncmd);
	}
	if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
		if (lpfc_ncmd->cpu != smp_processor_id())
			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
					 "6701 CPU Check cmpl: "
					 "cpu %d expect %d\n",
					 smp_processor_id(), lpfc_ncmd->cpu);
		if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
			phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
	}
#endif
1148 1149

	/* NVME targets need completion held off until the abort exchange
1150
	 * completes unless the NVME Rport is getting unregistered.
1151
	 */
1152

1153
	if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1154 1155
		freqpriv = nCmd->private;
		freqpriv->nvme_buf = NULL;
1156
		nCmd->done(nCmd);
1157
		lpfc_ncmd->nvmeCmd = NULL;
1158
	}
1159

1160
	/* Call release with XB=1 to queue the IO into the abort list. */
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
	lpfc_release_nvme_buf(phba, lpfc_ncmd);
}


/**
 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @lpfc_nvme_lport: Pointer to the driver's local port data
 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
 *
 * Driver registers this routine as it io request handler.  This
 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static int
lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
		      struct lpfc_nvme_buf *lpfc_ncmd,
1184 1185
		      struct lpfc_nodelist *pnode,
		      struct lpfc_nvme_ctrl_stat *cstat)
1186 1187 1188 1189
{
	struct lpfc_hba *phba = vport->phba;
	struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
	struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1190
	union lpfc_wqe128 *wqe = &pwqeq->wqe;
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
	uint32_t req_len;

	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return -EINVAL;

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.
	 */
	if (nCmd->sg_cnt) {
		if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1202 1203 1204 1205 1206 1207 1208 1209
			/* From the iwrite template, initialize words 7 - 11 */
			memcpy(&wqe->words[7],
			       &lpfc_iwrite_cmd_template.words[7],
			       sizeof(uint32_t) * 5);

			/* Word 4 */
			wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
			/* Word 5 */
			if ((phba->cfg_nvme_enable_fb) &&
			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
				req_len = lpfc_ncmd->nvmeCmd->payload_length;
				if (req_len < pnode->nvme_fb_size)
					wqe->fcp_iwrite.initial_xfer_len =
						req_len;
				else
					wqe->fcp_iwrite.initial_xfer_len =
						pnode->nvme_fb_size;
1220 1221
			} else {
				wqe->fcp_iwrite.initial_xfer_len = 0;
1222
			}
1223
			atomic_inc(&cstat->fc4NvmeOutputRequests);
1224
		} else {
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
			/* From the iread template, initialize words 7 - 11 */
			memcpy(&wqe->words[7],
			       &lpfc_iread_cmd_template.words[7],
			       sizeof(uint32_t) * 5);

			/* Word 4 */
			wqe->fcp_iread.total_xfer_len = nCmd->payload_length;

			/* Word 5 */
			wqe->fcp_iread.rsrvd5 = 0;
1235

1236
			atomic_inc(&cstat->fc4NvmeInputRequests);
1237 1238
		}
	} else {
1239 1240 1241
		/* From the icmnd template, initialize words 4 - 11 */
		memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
		       sizeof(uint32_t) * 8);
1242
		atomic_inc(&cstat->fc4NvmeControlRequests);
1243 1244 1245 1246 1247 1248
	}
	/*
	 * Finish initializing those WQE fields that are independent
	 * of the nvme_cmnd request_buffer
	 */

1249 1250 1251 1252
	/* Word 3 */
	bf_set(payload_offset_len, &wqe->fcp_icmd,
	       (nCmd->rsplen + nCmd->cmdlen));

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
	/* Word 6 */
	bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
	       phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);

	/* Word 8 */
	wqe->generic.wqe_com.abort_tag = pwqeq->iotag;

	/* Word 9 */
	bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);

1264 1265
	/* Words 13 14 15 are for PBDE support */

1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
	pwqeq->vport = vport;
	return 0;
}


/**
 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @lpfc_nvme_lport: Pointer to the driver's local port data
 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
 *
 * Driver registers this routine as it io request handler.  This
 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static int
lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
		      struct lpfc_nvme_buf *lpfc_ncmd)
{
	struct lpfc_hba *phba = vport->phba;
	struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1293
	union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1294 1295 1296
	struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
	struct scatterlist *data_sg;
	struct sli4_sge *first_data_sgl;
1297
	struct ulp_bde64 *bde;
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
	dma_addr_t physaddr;
	uint32_t num_bde = 0;
	uint32_t dma_len;
	uint32_t dma_offset = 0;
	int nseg, i;

	/* Fix up the command and response DMA stuff. */
	lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.
	 */
	if (nCmd->sg_cnt) {
		/*
		 * Jump over the cmd and rsp SGEs.  The fix routine
		 * has already adjusted for this.
		 */
		sgl += 2;

		first_data_sgl = sgl;
		lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
J
James Smart 已提交
1320
		if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1321 1322 1323 1324
			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
					"6058 Too many sg segments from "
					"NVME Transport.  Max %d, "
					"nvmeIO sg_cnt %d\n",
1325
					phba->cfg_nvme_seg_cnt + 1,
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
					lpfc_ncmd->seg_cnt);
			lpfc_ncmd->seg_cnt = 0;
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single nvme command.  Just run through the seg_cnt and format
		 * the sge's.
		 */
		nseg = nCmd->sg_cnt;
		data_sg = nCmd->first_sgl;
		for (i = 0; i < nseg; i++) {
			if (data_sg == NULL) {
				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
						"6059 dptr err %d, nseg %d\n",
						i, nseg);
				lpfc_ncmd->seg_cnt = 0;
				return 1;
			}
			physaddr = data_sg->dma_address;
			dma_len = data_sg->length;
			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
			sgl->word2 = le32_to_cpu(sgl->word2);
			if ((num_bde + 1) == nseg)
				bf_set(lpfc_sli4_sge_last, sgl, 1);
			else
				bf_set(lpfc_sli4_sge_last, sgl, 0);
			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
			sgl->word2 = cpu_to_le32(sgl->word2);
			sgl->sge_len = cpu_to_le32(dma_len);

			dma_offset += dma_len;
			data_sg = sg_next(data_sg);
			sgl++;
		}
1365
		if (phba->cfg_enable_pbde) {
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
			/* Use PBDE support for first SGL only, offset == 0 */
			/* Words 13-15 */
			bde = (struct ulp_bde64 *)
				&wqe->words[13];
			bde->addrLow = first_data_sgl->addr_lo;
			bde->addrHigh = first_data_sgl->addr_hi;
			bde->tus.f.bdeSize =
				le32_to_cpu(first_data_sgl->sge_len);
			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
			bde->tus.w = cpu_to_le32(bde->tus.w);
1376 1377 1378
			/* wqe_pbde is 1 in template */
		} else {
			memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1379
			bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1380
		}
1381

1382
	} else {
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
		/* For this clause to be valid, the payload_length
		 * and sg_cnt must zero.
		 */
		if (nCmd->payload_length != 0) {
			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
					"6063 NVME DMA Prep Err: sg_cnt %d "
					"payload_length x%x\n",
					nCmd->sg_cnt, nCmd->payload_length);
			return 1;
		}
	}
	return 0;
}

/**
 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @lpfc_nvme_lport: Pointer to the driver's local port data
 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
 *
 * Driver registers this routine as it io request handler.  This
 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport
 indicated in @lpfc_nvme_rport.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static int
lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
			struct nvme_fc_remote_port *pnvme_rport,
			void *hw_queue_handle,
			struct nvmefc_fcp_req *pnvme_fcreq)
{
	int ret = 0;
1421
	int expedite = 0;
1422
	int idx;
1423
	struct lpfc_nvme_lport *lport;
1424
	struct lpfc_nvme_ctrl_stat *cstat;
1425 1426 1427 1428 1429 1430
	struct lpfc_vport *vport;
	struct lpfc_hba *phba;
	struct lpfc_nodelist *ndlp;
	struct lpfc_nvme_buf *lpfc_ncmd;
	struct lpfc_nvme_rport *rport;
	struct lpfc_nvme_qhandle *lpfc_queue_info;
1431
	struct lpfc_nvme_fcpreq_priv *freqpriv;
1432
	struct nvme_common_command *sqe;
1433 1434 1435
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
	uint64_t start = 0;
#endif
1436

1437 1438 1439
	/* Validate pointers. LLDD fault handling with transport does
	 * have timing races.
	 */
1440
	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1441 1442 1443 1444 1445
	if (unlikely(!lport)) {
		ret = -EINVAL;
		goto out_fail;
	}

1446
	vport = lport->vport;
1447 1448

	if (unlikely(!hw_queue_handle)) {
1449 1450 1451
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
				 "6117 Fail IO, NULL hw_queue_handle\n");
		atomic_inc(&lport->xmt_fcp_err);
1452
		ret = -EBUSY;
1453 1454 1455
		goto out_fail;
	}

1456 1457
	phba = vport->phba;

1458 1459 1460 1461 1462
	if (vport->load_flag & FC_UNLOADING) {
		ret = -ENODEV;
		goto out_fail;
	}

1463
	if (vport->load_flag & FC_UNLOADING) {
1464 1465 1466
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
				 "6124 Fail IO, Driver unload\n");
		atomic_inc(&lport->xmt_fcp_err);
1467 1468 1469 1470
		ret = -ENODEV;
		goto out_fail;
	}

1471 1472
	freqpriv = pnvme_fcreq->private;
	if (unlikely(!freqpriv)) {
1473 1474 1475
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
				 "6158 Fail IO, NULL request data\n");
		atomic_inc(&lport->xmt_fcp_err);
1476
		ret = -EINVAL;
1477 1478 1479
		goto out_fail;
	}

1480 1481 1482 1483
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
	if (phba->ktime_on)
		start = ktime_get_ns();
#endif
1484 1485 1486 1487 1488 1489 1490 1491 1492
	rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
	lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;

	/*
	 * Catch race where our node has transitioned, but the
	 * transport is still transitioning.
	 */
	ndlp = rport->ndlp;
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1493 1494 1495
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
				 "6053 Fail IO, ndlp not ready: rport %p "
				  "ndlp %p, DID x%06x\n",
1496
				 rport, ndlp, pnvme_rport->port_id);
1497 1498 1499
		atomic_inc(&lport->xmt_fcp_err);
		ret = -EBUSY;
		goto out_fail;
1500 1501 1502 1503 1504
	}

	/* The remote node has to be a mapped target or it's an error. */
	if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1505 1506
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
				 "6036 Fail IO, DID x%06x not ready for "
1507
				 "IO. State x%x, Type x%x Flg x%x\n",
1508
				 pnvme_rport->port_id,
1509 1510
				 ndlp->nlp_state, ndlp->nlp_type,
				 ndlp->upcall_flags);
1511
		atomic_inc(&lport->xmt_fcp_bad_ndlp);
1512
		ret = -EBUSY;
1513 1514 1515 1516
		goto out_fail;

	}

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
	/* Currently only NVME Keep alive commands should be expedited
	 * if the driver runs out of a resource. These should only be
	 * issued on the admin queue, qidx 0
	 */
	if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
		sqe = &((struct nvme_fc_cmd_iu *)
			pnvme_fcreq->cmdaddr)->sqe.common;
		if (sqe->opcode == nvme_admin_keep_alive)
			expedite = 1;
	}

1528 1529 1530
	/* The node is shared with FCP IO, make sure the IO pending count does
	 * not exceed the programmed depth.
	 */
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
	if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
		if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
		    !expedite) {
			lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
					 "6174 Fail IO, ndlp qdepth exceeded: "
					 "idx %d DID %x pend %d qdepth %d\n",
					 lpfc_queue_info->index, ndlp->nlp_DID,
					 atomic_read(&ndlp->cmd_pending),
					 ndlp->cmd_qdepth);
			atomic_inc(&lport->xmt_fcp_qdepth);
			ret = -EBUSY;
			goto out_fail;
		}
1544 1545
	}

1546
	lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
1547
	if (lpfc_ncmd == NULL) {
1548
		atomic_inc(&lport->xmt_fcp_noxri);
1549
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1550 1551 1552
				 "6065 Fail IO, driver buffer pool is empty: "
				 "idx %d DID %x\n",
				 lpfc_queue_info->index, ndlp->nlp_DID);
1553
		ret = -EBUSY;
1554 1555
		goto out_fail;
	}
1556
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1557
	if (start) {
1558 1559
		lpfc_ncmd->ts_cmd_start = start;
		lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1560 1561
	} else {
		lpfc_ncmd->ts_cmd_start = 0;
1562 1563
	}
#endif
1564 1565 1566 1567 1568 1569 1570

	/*
	 * Store the data needed by the driver to issue, abort, and complete
	 * an IO.
	 * Do not let the IO hang out forever.  There is no midlayer issuing
	 * an abort so inform the FW of the maximum IO pending time.
	 */
1571
	freqpriv->nvme_buf = lpfc_ncmd;
1572
	lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1573
	lpfc_ncmd->ndlp = ndlp;
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
	lpfc_ncmd->start_time = jiffies;

	/*
	 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
	 * This identfier was create in our hardware queue create callback
	 * routine. The driver now is dependent on the IO queue steering from
	 * the transport.  We are trusting the upper NVME layers know which
	 * index to use and that they have affinitized a CPU to this hardware
	 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
	 */
1584 1585 1586 1587 1588 1589 1590
	idx = lpfc_queue_info->index;
	lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
	cstat = &lport->cstat[idx];

	lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
	ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
	if (ret) {
1591 1592 1593 1594 1595
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
				 "6175 Fail IO, Prep DMA: "
				 "idx %d DID %x\n",
				 lpfc_queue_info->index, ndlp->nlp_DID);
		atomic_inc(&lport->xmt_fcp_err);
1596 1597 1598 1599
		ret = -ENOMEM;
		goto out_free_nvme_buf;
	}

1600 1601 1602 1603
	lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
			 lpfc_ncmd->cur_iocbq.sli4_xritag,
			 lpfc_queue_info->index, ndlp->nlp_DID);

1604 1605
	ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
	if (ret) {
1606
		atomic_inc(&lport->xmt_fcp_wqerr);
1607
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1608
				 "6113 Fail IO, Could not issue WQE err %x "
1609 1610 1611 1612 1613 1614
				 "sid: x%x did: x%x oxid: x%x\n",
				 ret, vport->fc_myDID, ndlp->nlp_DID,
				 lpfc_ncmd->cur_iocbq.sli4_xritag);
		goto out_free_nvme_buf;
	}

1615
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1616
	if (lpfc_ncmd->ts_cmd_start)
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
		lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();

	if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
		lpfc_ncmd->cpu = smp_processor_id();
		if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
			/* Check for admin queue */
			if (lpfc_queue_info->qidx) {
				lpfc_printf_vlog(vport,
						 KERN_ERR, LOG_NVME_IOERR,
						"6702 CPU Check cmd: "
						"cpu %d wq %d\n",
						lpfc_ncmd->cpu,
						lpfc_queue_info->index);
			}
			lpfc_ncmd->cpu = lpfc_queue_info->index;
		}
		if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
			phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
	}
#endif
1637 1638 1639
	return 0;

 out_free_nvme_buf:
1640 1641
	if (lpfc_ncmd->nvmeCmd->sg_cnt) {
		if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1642
			atomic_dec(&cstat->fc4NvmeOutputRequests);
1643
		else
1644
			atomic_dec(&cstat->fc4NvmeInputRequests);
1645
	} else
1646
		atomic_dec(&cstat->fc4NvmeControlRequests);
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
	lpfc_release_nvme_buf(phba, lpfc_ncmd);
 out_fail:
	return ret;
}

/**
 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
 * @phba: Pointer to HBA context object
 * @cmdiocb: Pointer to command iocb object.
 * @rspiocb: Pointer to response iocb object.
 *
 * This is the callback function for any NVME FCP IO that was aborted.
 *
 * Return value:
 *   None
 **/
void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
			   struct lpfc_wcqe_complete *abts_cmpl)
{
1667
	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
			"6145 ABORT_XRI_CN completing on rpi x%x "
			"original iotag x%x, abort cmd iotag x%x "
			"req_tag x%x, status x%x, hwstatus x%x\n",
			cmdiocb->iocb.un.acxri.abortContextTag,
			cmdiocb->iocb.un.acxri.abortIoTag,
			cmdiocb->iotag,
			bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
			bf_get(lpfc_wcqe_c_status, abts_cmpl),
			bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
	lpfc_sli_release_iocbq(phba, cmdiocb);
}

/**
 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
 * @lpfc_pnvme: Pointer to the driver's nvme instance data
 * @lpfc_nvme_lport: Pointer to the driver's local port data
 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
 *
 * Driver registers this routine as its nvme request io abort handler.  This
 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
 * is executed asynchronously - one the target is validated as "MAPPED" and
 * ready for IO, the driver issues the abort request and returns.
 *
 * Return value:
 *   None
 **/
static void
lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
		    struct nvme_fc_remote_port *pnvme_rport,
		    void *hw_queue_handle,
		    struct nvmefc_fcp_req *pnvme_fcreq)
{
	struct lpfc_nvme_lport *lport;
	struct lpfc_vport *vport;
	struct lpfc_hba *phba;
	struct lpfc_nvme_buf *lpfc_nbuf;
	struct lpfc_iocbq *abts_buf;
	struct lpfc_iocbq *nvmereq_wqe;
1709
	struct lpfc_nvme_fcpreq_priv *freqpriv;
1710
	union lpfc_wqe128 *abts_wqe;
1711 1712 1713
	unsigned long flags;
	int ret_val;

1714 1715 1716
	/* Validate pointers. LLDD fault handling with transport does
	 * have timing races.
	 */
1717
	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1718 1719 1720
	if (unlikely(!lport))
		return;

1721
	vport = lport->vport;
1722 1723 1724 1725 1726 1727 1728

	if (unlikely(!hw_queue_handle)) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
				 "6129 Fail Abort, HW Queue Handle NULL.\n");
		return;
	}

1729
	phba = vport->phba;
1730
	freqpriv = pnvme_fcreq->private;
1731

1732 1733
	if (unlikely(!freqpriv))
		return;
1734 1735 1736
	if (vport->load_flag & FC_UNLOADING)
		return;

1737
	/* Announce entry to new IO submit field. */
J
James Smart 已提交
1738
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
			 "6002 Abort Request to rport DID x%06x "
			 "for nvme_fc_req %p\n",
			 pnvme_rport->port_id,
			 pnvme_fcreq);

	/* If the hba is getting reset, this flag is set.  It is
	 * cleared when the reset is complete and rings reestablished.
	 */
	spin_lock_irqsave(&phba->hbalock, flags);
	/* driver queued commands are in process of being flushed */
	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
1751
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1752 1753 1754 1755 1756 1757
				 "6139 Driver in reset cleanup - flushing "
				 "NVME Req now.  hba_flag x%x\n",
				 phba->hba_flag);
		return;
	}

1758
	lpfc_nbuf = freqpriv->nvme_buf;
1759 1760
	if (!lpfc_nbuf) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
1761
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1762 1763 1764 1765 1766
				 "6140 NVME IO req has no matching lpfc nvme "
				 "io buffer.  Skipping abort req.\n");
		return;
	} else if (!lpfc_nbuf->nvmeCmd) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
1767
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1768 1769 1770 1771
				 "6141 lpfc NVME IO req has no nvme_fcreq "
				 "io buffer.  Skipping abort req.\n");
		return;
	}
J
James Smart 已提交
1772
	nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782

	/*
	 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
	 * state must match the nvme_fcreq passed by the nvme
	 * transport.  If they don't match, it is likely the driver
	 * has already completed the NVME IO and the nvme transport
	 * has not seen it yet.
	 */
	if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
1783
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1784 1785
				 "6143 NVME req mismatch: "
				 "lpfc_nbuf %p nvmeCmd %p, "
J
James Smart 已提交
1786
				 "pnvme_fcreq %p.  Skipping Abort xri x%x\n",
1787
				 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
J
James Smart 已提交
1788
				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1789 1790 1791 1792 1793 1794
		return;
	}

	/* Don't abort IOs no longer on the pending queue. */
	if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
1795
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1796
				 "6142 NVME IO req %p not queued - skipping "
J
James Smart 已提交
1797 1798
				 "abort req xri x%x\n",
				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1799 1800 1801
		return;
	}

1802
	atomic_inc(&lport->xmt_fcp_abort);
1803 1804
	lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
			 nvmereq_wqe->sli4_xritag,
1805
			 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1806

1807 1808 1809
	/* Outstanding abort is in progress */
	if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
1810
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1811 1812
				 "6144 Outstanding NVME I/O Abort Request "
				 "still pending on nvme_fcreq %p, "
J
James Smart 已提交
1813 1814 1815
				 "lpfc_ncmd %p xri x%x\n",
				 pnvme_fcreq, lpfc_nbuf,
				 nvmereq_wqe->sli4_xritag);
1816 1817 1818 1819 1820 1821
		return;
	}

	abts_buf = __lpfc_sli_get_iocbq(phba);
	if (!abts_buf) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
J
James Smart 已提交
1822
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1823
				 "6136 No available abort wqes. Skipping "
J
James Smart 已提交
1824 1825
				 "Abts req for nvme_fcreq %p xri x%x\n",
				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
		return;
	}

	/* Ready - mark outstanding as aborted by driver. */
	nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;

	/* Complete prepping the abort wqe and issue to the FW. */
	abts_wqe = &abts_buf->wqe;

	/* WQEs are reused.  Clear stale data and set key fields to
	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
	 */
	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);

	/* word 7 */
	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
	bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
	       nvmereq_wqe->iocb.ulpClass);

	/* word 8 - tell the FW to abort the IO associated with this
	 * outstanding exchange ID.
	 */
	abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;

	/* word 9 - this is the iotag for the abts_wqe completion. */
	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
	       abts_buf->iotag);

	/* word 10 */
	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);

	/* word 11 */
	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);

	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
	abts_buf->iocb_flag |= LPFC_IO_NVME;
	abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
	abts_buf->vport = vport;
	abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
	ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
	spin_unlock_irqrestore(&phba->hbalock, flags);
1871
	if (ret_val) {
J
James Smart 已提交
1872
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1873 1874 1875 1876 1877 1878 1879
				 "6137 Failed abts issue_wqe with status x%x "
				 "for nvme_fcreq %p.\n",
				 ret_val, pnvme_fcreq);
		lpfc_sli_release_iocbq(phba, abts_buf);
		return;
	}

J
James Smart 已提交
1880
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
J
James Smart 已提交
1881
			 "6138 Transport Abort NVME Request Issued for "
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
			 "ox_id x%x on reqtag x%x\n",
			 nvmereq_wqe->sli4_xritag,
			 abts_buf->iotag);
}

/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
	/* initiator-based functions */
	.localport_delete  = lpfc_nvme_localport_delete,
	.remoteport_delete = lpfc_nvme_remoteport_delete,
	.create_queue = lpfc_nvme_create_queue,
	.delete_queue = lpfc_nvme_delete_queue,
	.ls_req       = lpfc_nvme_ls_req,
	.fcp_io       = lpfc_nvme_fcp_io_submit,
	.ls_abort     = lpfc_nvme_ls_abort,
	.fcp_abort    = lpfc_nvme_fcp_abort,

	.max_hw_queues = 1,
	.max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
	.max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
	.dma_boundary = 0xFFFFFFFF,

	/* Sizes of additional private data for data structures.
	 * No use for the last two sizes at this time.
	 */
	.local_priv_sz = sizeof(struct lpfc_nvme_lport),
	.remote_priv_sz = sizeof(struct lpfc_nvme_rport),
	.lsrqst_priv_sz = 0,
1910
	.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
};

/**
 * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
 * @phba: pointer to lpfc hba data structure.
 * @nblist: pointer to nvme buffer list.
 * @count: number of scsi buffers on the list.
 *
 * This routine is invoked to post a block of @count scsi sgl pages from a
 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
 * No Lock is held.
 *
 **/
static int
lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
			      struct list_head *nblist,
			      int count)
{
	struct lpfc_nvme_buf *lpfc_ncmd;
	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
	struct sgl_page_pairs *sgl_pg_pairs;
	void *viraddr;
	LPFC_MBOXQ_t *mbox;
	uint32_t reqlen, alloclen, pg_pairs;
	uint32_t mbox_tmo;
	uint16_t xritag_start = 0;
	int rc = 0;
	uint32_t shdr_status, shdr_add_status;
	dma_addr_t pdma_phys_bpl1;
	union lpfc_sli4_cfg_shdr *shdr;

	/* Calculate the requested length of the dma memory */
	reqlen = count * sizeof(struct sgl_page_pairs) +
		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
	if (reqlen > SLI4_PAGE_SIZE) {
		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
				"6118 Block sgl registration required DMA "
				"size (%d) great than a page\n", reqlen);
		return -ENOMEM;
	}
	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
	if (!mbox) {
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"6119 Failed to allocate mbox cmd memory\n");
		return -ENOMEM;
	}

	/* Allocate DMA memory and set up the non-embedded mailbox command */
	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
				LPFC_SLI4_MBX_NEMBED);

	if (alloclen < reqlen) {
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"6120 Allocated DMA memory size (%d) is "
				"less than the requested DMA memory "
				"size (%d)\n", alloclen, reqlen);
		lpfc_sli4_mbox_cmd_free(phba, mbox);
		return -ENOMEM;
	}

	/* Get the first SGE entry from the non-embedded DMA memory */
	viraddr = mbox->sge_array->addr[0];

	/* Set up the SGL pages in the non-embedded DMA pages */
	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
	sgl_pg_pairs = &sgl->sgl_pg_pairs;

	pg_pairs = 0;
	list_for_each_entry(lpfc_ncmd, nblist, list) {
		/* Set up the sge entry */
		sgl_pg_pairs->sgl_pg0_addr_lo =
			cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
		sgl_pg_pairs->sgl_pg0_addr_hi =
			cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
			pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
						SGL_PAGE_SIZE;
		else
			pdma_phys_bpl1 = 0;
		sgl_pg_pairs->sgl_pg1_addr_lo =
			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
		sgl_pg_pairs->sgl_pg1_addr_hi =
			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
		/* Keep the first xritag on the list */
		if (pg_pairs == 0)
			xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
		sgl_pg_pairs++;
		pg_pairs++;
	}
	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
	/* Perform endian conversion if necessary */
	sgl->word0 = cpu_to_le32(sgl->word0);

	if (!phba->sli4_hba.intr_enable)
		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
	else {
		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
	}
	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
	if (rc != MBX_TIMEOUT)
		lpfc_sli4_mbox_cmd_free(phba, mbox);
	if (shdr_status || shdr_add_status || rc) {
		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
				"6125 POST_SGL_BLOCK mailbox command failed "
				"status x%x add_status x%x mbx status x%x\n",
				shdr_status, shdr_add_status, rc);
		rc = -ENXIO;
	}
	return rc;
}

/**
 * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
 * @phba: pointer to lpfc hba data structure.
 * @post_nblist: pointer to the nvme buffer list.
 *
 * This routine walks a list of nvme buffers that was passed in. It attempts
 * to construct blocks of nvme buffer sgls which contains contiguous xris and
 * uses the non-embedded SGL block post mailbox commands to post to the port.
 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
 * embedded SGL post mailbox command for posting. The @post_nblist passed in
 * must be local list, thus no lock is needed when manipulate the list.
 *
 * Returns: 0 = failure, non-zero number of successfully posted buffers.
 **/
static int
lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
			     struct list_head *post_nblist, int sb_count)
{
	struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
	int status, sgl_size;
	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
	dma_addr_t pdma_phys_sgl1;
	int last_xritag = NO_XRI;
	int cur_xritag;
	LIST_HEAD(prep_nblist);
	LIST_HEAD(blck_nblist);
	LIST_HEAD(nvme_nblist);

	/* sanity check */
	if (sb_count <= 0)
		return -EINVAL;

	sgl_size = phba->cfg_sg_dma_buf_size;

	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
		list_del_init(&lpfc_ncmd->list);
		block_cnt++;
		if ((last_xritag != NO_XRI) &&
		    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
			/* a hole in xri block, form a sgl posting block */
			list_splice_init(&prep_nblist, &blck_nblist);
			post_cnt = block_cnt - 1;
			/* prepare list for next posting block */
			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
			block_cnt = 1;
		} else {
			/* prepare list for next posting block */
			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
			/* enough sgls for non-embed sgl mbox command */
			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
				list_splice_init(&prep_nblist, &blck_nblist);
				post_cnt = block_cnt;
				block_cnt = 0;
			}
		}
		num_posting++;
		last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;

		/* end of repost sgl list condition for NVME buffers */
		if (num_posting == sb_count) {
			if (post_cnt == 0) {
				/* last sgl posting block */
				list_splice_init(&prep_nblist, &blck_nblist);
				post_cnt = block_cnt;
			} else if (block_cnt == 1) {
				/* last single sgl with non-contiguous xri */
				if (sgl_size > SGL_PAGE_SIZE)
					pdma_phys_sgl1 =
						lpfc_ncmd->dma_phys_sgl +
						SGL_PAGE_SIZE;
				else
					pdma_phys_sgl1 = 0;
				cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
				status = lpfc_sli4_post_sgl(phba,
						lpfc_ncmd->dma_phys_sgl,
						pdma_phys_sgl1, cur_xritag);
				if (status) {
					/* failure, put on abort nvme list */
2105
					lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
2106 2107
				} else {
					/* success, put on NVME buffer list */
2108
					lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
					lpfc_ncmd->status = IOSTAT_SUCCESS;
					num_posted++;
				}
				/* success, put on NVME buffer sgl list */
				list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
			}
		}

		/* continue until a nembed page worth of sgls */
		if (post_cnt == 0)
			continue;

		/* post block of NVME buffer list sgls */
		status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
						       post_cnt);

		/* don't reset xirtag due to hole in xri block */
		if (block_cnt == 0)
			last_xritag = NO_XRI;

		/* reset NVME buffer post count for next round of posting */
		post_cnt = 0;

		/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
		while (!list_empty(&blck_nblist)) {
			list_remove_head(&blck_nblist, lpfc_ncmd,
					 struct lpfc_nvme_buf, list);
			if (status) {
				/* failure, put on abort nvme list */
2138
				lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
2139 2140
			} else {
				/* success, put on NVME buffer list */
2141
				lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
				lpfc_ncmd->status = IOSTAT_SUCCESS;
				num_posted++;
			}
			list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
		}
	}
	/* Push NVME buffers with sgl posted to the available list */
	while (!list_empty(&nvme_nblist)) {
		list_remove_head(&nvme_nblist, lpfc_ncmd,
				 struct lpfc_nvme_buf, list);
		lpfc_release_nvme_buf(phba, lpfc_ncmd);
	}
	return num_posted;
}

/**
 * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
 * @phba: pointer to lpfc hba data structure.
 *
 * This routine walks the list of nvme buffers that have been allocated and
 * repost them to the port by using SGL block post. This is needed after a
 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
 * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
 *
 * Returns: 0 = success, non-zero failure.
 **/
int
lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
{
	LIST_HEAD(post_nblist);
	int num_posted, rc = 0;

	/* get all NVME buffers need to repost to a local list */
	spin_lock_irq(&phba->nvme_buf_list_get_lock);
	spin_lock(&phba->nvme_buf_list_put_lock);
	list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
	list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
2180 2181
	phba->get_nvme_bufs = 0;
	phba->put_nvme_bufs = 0;
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
	spin_unlock(&phba->nvme_buf_list_put_lock);
	spin_unlock_irq(&phba->nvme_buf_list_get_lock);

	/* post the list of nvme buffer sgls to port if available */
	if (!list_empty(&post_nblist)) {
		num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
						phba->sli4_hba.nvme_xri_cnt);
		/* failed to post any nvme buffer, return error */
		if (num_posted == 0)
			rc = -EIO;
	}
	return rc;
}

/**
 * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
 * @vport: The virtual port for which this call being executed.
 * @num_to_allocate: The requested number of buffers to allocate.
 *
 * This routine allocates nvme buffers for device with SLI-4 interface spec,
 * the nvme buffer contains all the necessary information needed to initiate
 * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
 * them on a list, it post them to the port by using SGL block post.
 *
 * Return codes:
 *   int - number of nvme buffers that were allocated and posted.
 *   0 = failure, less than num_to_alloc is a partial failure.
 **/
static int
lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
{
	struct lpfc_hba *phba = vport->phba;
	struct lpfc_nvme_buf *lpfc_ncmd;
	struct lpfc_iocbq *pwqeq;
	union lpfc_wqe128 *wqe;
	struct sli4_sge *sgl;
	dma_addr_t pdma_phys_sgl;
	uint16_t iotag, lxri = 0;
2220
	int bcnt, num_posted;
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
	LIST_HEAD(prep_nblist);
	LIST_HEAD(post_nblist);
	LIST_HEAD(nvme_nblist);

	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
		lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
		if (!lpfc_ncmd)
			break;
		/*
		 * Get memory from the pci pool to map the virt space to
		 * pci bus space for an I/O. The DMA buffer includes the
		 * number of SGE's necessary to support the sg_tablesize.
		 */
2234 2235 2236
		lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
						  GFP_KERNEL,
						  &lpfc_ncmd->dma_handle);
2237 2238 2239 2240 2241 2242 2243
		if (!lpfc_ncmd->data) {
			kfree(lpfc_ncmd);
			break;
		}

		lxri = lpfc_sli4_next_xritag(phba);
		if (lxri == NO_XRI) {
2244
			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
2245 2246 2247 2248 2249
				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
			kfree(lpfc_ncmd);
			break;
		}
		pwqeq = &(lpfc_ncmd->cur_iocbq);
2250
		wqe = &pwqeq->wqe;
2251 2252 2253 2254

		/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
		iotag = lpfc_sli_next_iotag(phba, pwqeq);
		if (iotag == 0) {
2255
			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
			kfree(lpfc_ncmd);
			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
					"6121 Failed to allocated IOTAG for"
					" XRI:0x%x\n", lxri);
			lpfc_sli4_free_xri(phba, lxri);
			break;
		}
		pwqeq->sli4_lxritag = lxri;
		pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
		pwqeq->iocb_flag |= LPFC_IO_NVME;
		pwqeq->context1 = lpfc_ncmd;
		pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;

		/* Initialize local short-hand pointers. */
		lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
		sgl = lpfc_ncmd->nvme_sgl;
		pdma_phys_sgl = lpfc_ncmd->dma_handle;
		lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;

		/* Rsp SGE will be filled in when we rcv an IO
		 * from the NVME Layer to be sent.
		 * The cmd is going to be embedded so we need a SKIP SGE.
		 */
		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
		bf_set(lpfc_sli4_sge_last, sgl, 0);
		sgl->word2 = cpu_to_le32(sgl->word2);
		/* Fill in word 3 / sgl_len during cmd submission */

		lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;

2287 2288
		/* Initialize WQE */
		memset(wqe, 0, sizeof(union lpfc_wqe));
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309

		/* add the nvme buffer to a post list */
		list_add_tail(&lpfc_ncmd->list, &post_nblist);
		spin_lock_irq(&phba->nvme_buf_list_get_lock);
		phba->sli4_hba.nvme_xri_cnt++;
		spin_unlock_irq(&phba->nvme_buf_list_get_lock);
	}
	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
			"6114 Allocate %d out of %d requested new NVME "
			"buffers\n", bcnt, num_to_alloc);

	/* post the list of nvme buffer sgls to port if available */
	if (!list_empty(&post_nblist))
		num_posted = lpfc_post_nvme_sgl_list(phba,
						     &post_nblist, bcnt);
	else
		num_posted = 0;

	return num_posted;
}

2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
static inline struct lpfc_nvme_buf *
lpfc_nvme_buf(struct lpfc_hba *phba)
{
	struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;

	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
				 &phba->lpfc_nvme_buf_list_get, list) {
		list_del_init(&lpfc_ncmd->list);
		phba->get_nvme_bufs--;
		return lpfc_ncmd;
	}
	return NULL;
}

2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335
/**
 * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
 * @phba: The HBA for which this call is being executed.
 *
 * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_nvme_buf - Success
 **/
static struct lpfc_nvme_buf *
2336 2337
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
		  int expedite)
2338
{
2339
	struct lpfc_nvme_buf *lpfc_ncmd = NULL;
2340 2341 2342
	unsigned long iflag = 0;

	spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2343 2344 2345
	if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
		lpfc_ncmd = lpfc_nvme_buf(phba);
	if (!lpfc_ncmd) {
2346 2347 2348
		spin_lock(&phba->nvme_buf_list_put_lock);
		list_splice(&phba->lpfc_nvme_buf_list_put,
			    &phba->lpfc_nvme_buf_list_get);
2349
		phba->get_nvme_bufs += phba->put_nvme_bufs;
2350
		INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2351
		phba->put_nvme_bufs = 0;
2352
		spin_unlock(&phba->nvme_buf_list_put_lock);
2353 2354
		if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
			lpfc_ncmd = lpfc_nvme_buf(phba);
2355 2356
	}
	spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2357 2358 2359 2360 2361

	if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
		atomic_inc(&ndlp->cmd_pending);
		lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
	}
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379
	return  lpfc_ncmd;
}

/**
 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
 * @phba: The Hba for which this call is being executed.
 * @lpfc_ncmd: The nvme buffer which is being released.
 *
 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
 * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
 * and cannot be reused for at least RA_TOV amount of time if it was
 * aborted.
 **/
static void
lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
{
	unsigned long iflag = 0;

2380 2381 2382
	if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
		atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);

2383
	lpfc_ncmd->nonsg_phys = 0;
2384 2385 2386
	lpfc_ncmd->ndlp = NULL;
	lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;

2387
	if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
J
James Smart 已提交
2388 2389 2390 2391 2392 2393
		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
				"6310 XB release deferred for "
				"ox_id x%x on reqtag x%x\n",
				lpfc_ncmd->cur_iocbq.sli4_xritag,
				lpfc_ncmd->cur_iocbq.iotag);

2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
		spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
					iflag);
		list_add_tail(&lpfc_ncmd->list,
			&phba->sli4_hba.lpfc_abts_nvme_buf_list);
		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
					iflag);
	} else {
		lpfc_ncmd->nvmeCmd = NULL;
		lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
		spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
		list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2405
		phba->put_nvme_bufs++;
2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
		spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
	}
}

/**
 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
 * @pvport - the lpfc_vport instance requesting a localport.
 *
 * This routine is invoked to create an nvme localport instance to bind
 * to the nvme_fc_transport.  It is called once during driver load
 * like lpfc_create_shost after all other services are initialized.
 * It requires a vport, vpi, and wwns at call time.  Other localport
 * parameters are modified as the driver's FCID and the Fabric WWN
 * are established.
 *
 * Return codes
 *      0 - successful
 *      -ENOMEM - no heap memory available
 *      other values - from nvme registration upcall
 **/
int
lpfc_nvme_create_localport(struct lpfc_vport *vport)
{
2429
	int ret = 0;
2430 2431 2432 2433
	struct lpfc_hba  *phba = vport->phba;
	struct nvme_fc_port_info nfcp_info;
	struct nvme_fc_local_port *localport;
	struct lpfc_nvme_lport *lport;
2434 2435
	struct lpfc_nvme_ctrl_stat *cstat;
	int len, i;
2436 2437 2438 2439 2440 2441 2442 2443 2444

	/* Initialize this localport instance.  The vport wwn usage ensures
	 * that NPIV is accounted for.
	 */
	memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
	nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
	nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
	nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);

2445 2446 2447
	/* We need to tell the transport layer + 1 because it takes page
	 * alignment into account. When space for the SGL is allocated we
	 * allocate + 3, one for cmd, one for rsp and one for this alignment
2448 2449
	 */
	lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2450 2451
	lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;

2452 2453 2454 2455 2456
	cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
			phba->cfg_nvme_io_channel), GFP_KERNEL);
	if (!cstat)
		return -ENOMEM;

2457 2458 2459
	/* localport is allocated from the stack, but the registration
	 * call allocates heap memory as well as the private area.
	 */
2460
#if (IS_ENABLED(CONFIG_NVME_FC))
2461 2462
	ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
					 &vport->phba->pcidev->dev, &localport);
2463 2464 2465
#else
	ret = -ENOMEM;
#endif
2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
	if (!ret) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
				 "6005 Successfully registered local "
				 "NVME port num %d, localP %p, private %p, "
				 "sg_seg %d\n",
				 localport->port_num, localport,
				 localport->private,
				 lpfc_nvme_template.max_sgl_segments);

		/* Private is our lport size declared in the template. */
		lport = (struct lpfc_nvme_lport *)localport->private;
		vport->localport = localport;
		lport->vport = vport;
2479
		lport->cstat = cstat;
2480
		vport->nvmei_support = 1;
2481

2482 2483 2484
		atomic_set(&lport->xmt_fcp_noxri, 0);
		atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
		atomic_set(&lport->xmt_fcp_qdepth, 0);
2485
		atomic_set(&lport->xmt_fcp_err, 0);
2486 2487 2488 2489 2490 2491 2492 2493
		atomic_set(&lport->xmt_fcp_wqerr, 0);
		atomic_set(&lport->xmt_fcp_abort, 0);
		atomic_set(&lport->xmt_ls_abort, 0);
		atomic_set(&lport->xmt_ls_err, 0);
		atomic_set(&lport->cmpl_fcp_xb, 0);
		atomic_set(&lport->cmpl_fcp_err, 0);
		atomic_set(&lport->cmpl_ls_xb, 0);
		atomic_set(&lport->cmpl_ls_err, 0);
2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
		atomic_set(&lport->fc4NvmeLsRequests, 0);
		atomic_set(&lport->fc4NvmeLsCmpls, 0);

		for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
			cstat = &lport->cstat[i];
			atomic_set(&cstat->fc4NvmeInputRequests, 0);
			atomic_set(&cstat->fc4NvmeOutputRequests, 0);
			atomic_set(&cstat->fc4NvmeControlRequests, 0);
			atomic_set(&cstat->fc4NvmeIoCmpls, 0);
		}
2504

2505 2506 2507 2508 2509 2510 2511 2512
		/* Don't post more new bufs if repost already recovered
		 * the nvme sgls.
		 */
		if (phba->sli4_hba.nvme_xri_cnt == 0) {
			len  = lpfc_new_nvme_buf(vport,
						 phba->sli4_hba.nvme_xri_max);
			vport->phba->total_nvme_bufs += len;
		}
2513 2514
	} else {
		kfree(cstat);
2515 2516 2517 2518 2519
	}

	return ret;
}

2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560
/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
 *
 * The driver has to wait for the host nvme transport to callback
 * indicating the localport has successfully unregistered all
 * resources.  Since this is an uninterruptible wait, loop every ten
 * seconds and print a message indicating no progress.
 *
 * An uninterruptible wait is used because of the risk of transport-to-
 * driver state mismatch.
 */
void
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
			   struct lpfc_nvme_lport *lport)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
	u32 wait_tmo;
	int ret;

	/* Host transport has to clean up and confirm requiring an indefinite
	 * wait. Print a message if a 10 second wait expires and renew the
	 * wait. This is unexpected.
	 */
	wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
	while (true) {
		ret = wait_for_completion_timeout(&lport->lport_unreg_done,
						  wait_tmo);
		if (unlikely(!ret)) {
			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
					 "6176 Lport %p Localport %p wait "
					 "timed out. Renewing.\n",
					 lport, vport->localport);
			continue;
		}
		break;
	}
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
			 "6177 Lport %p Localport %p Complete Success\n",
			 lport, vport->localport);
#endif
}

2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573
/**
 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
 * @pnvme: pointer to lpfc nvme data structure.
 *
 * This routine is invoked to destroy all lports bound to the phba.
 * The lport memory was allocated by the nvme fc transport and is
 * released there.  This routine ensures all rports bound to the
 * lport have been disconnected.
 *
 **/
void
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
{
2574
#if (IS_ENABLED(CONFIG_NVME_FC))
2575 2576
	struct nvme_fc_local_port *localport;
	struct lpfc_nvme_lport *lport;
2577
	struct lpfc_nvme_ctrl_stat *cstat;
2578 2579 2580 2581 2582 2583 2584 2585
	int ret;

	if (vport->nvmei_support == 0)
		return;

	localport = vport->localport;
	vport->localport = NULL;
	lport = (struct lpfc_nvme_lport *)localport->private;
2586
	cstat = lport->cstat;
2587 2588 2589 2590

	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
			 "6011 Destroying NVME localport %p\n",
			 localport);
2591

2592 2593 2594 2595 2596
	/* lport's rport list is clear.  Unregister
	 * lport and release resources.
	 */
	init_completion(&lport->lport_unreg_done);
	ret = nvme_fc_unregister_localport(localport);
2597 2598 2599 2600 2601

	/* Wait for completion.  This either blocks
	 * indefinitely or succeeds
	 */
	lpfc_nvme_lport_unreg_wait(vport, lport);
2602
	kfree(cstat);
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619

	/* Regardless of the unregister upcall response, clear
	 * nvmei_support.  All rports are unregistered and the
	 * driver will clean up.
	 */
	vport->nvmei_support = 0;
	if (ret == 0) {
		lpfc_printf_vlog(vport,
				 KERN_INFO, LOG_NVME_DISC,
				 "6009 Unregistered lport Success\n");
	} else {
		lpfc_printf_vlog(vport,
				 KERN_INFO, LOG_NVME_DISC,
				 "6010 Unregistered lport "
				 "Failed, status x%x\n",
				 ret);
	}
2620
#endif
2621 2622 2623 2624 2625
}

void
lpfc_nvme_update_localport(struct lpfc_vport *vport)
{
2626
#if (IS_ENABLED(CONFIG_NVME_FC))
2627 2628 2629 2630
	struct nvme_fc_local_port *localport;
	struct lpfc_nvme_lport *lport;

	localport = vport->localport;
2631 2632 2633 2634 2635
	if (!localport) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
				 "6710 Update NVME fail. No localport\n");
		return;
	}
2636
	lport = (struct lpfc_nvme_lport *)localport->private;
2637 2638 2639 2640 2641 2642
	if (!lport) {
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
				 "6171 Update NVME fail. localP %p, No lport\n",
				 localport);
		return;
	}
2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
			 "6012 Update NVME lport %p did x%x\n",
			 localport, vport->fc_myDID);

	localport->port_id = vport->fc_myDID;
	if (localport->port_id == 0)
		localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
	else
		localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;

	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
			 "6030 bound lport %p to DID x%06x\n",
			 lport, localport->port_id);
2656
#endif
2657 2658 2659 2660 2661
}

int
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
2662
#if (IS_ENABLED(CONFIG_NVME_FC))
2663 2664 2665 2666
	int ret = 0;
	struct nvme_fc_local_port *localport;
	struct lpfc_nvme_lport *lport;
	struct lpfc_nvme_rport *rport;
2667
	struct lpfc_nvme_rport *oldrport;
2668 2669
	struct nvme_fc_remote_port *remote_port;
	struct nvme_fc_port_info rpinfo;
2670
	struct lpfc_nodelist *prev_ndlp = NULL;
2671 2672 2673 2674 2675 2676

	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
			 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
			 ndlp->nlp_DID, ndlp->nlp_type);

	localport = vport->localport;
2677 2678 2679
	if (!localport)
		return 0;

2680 2681
	lport = (struct lpfc_nvme_lport *)localport->private;

2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699
	/* NVME rports are not preserved across devloss.
	 * Just register this instance.  Note, rpinfo->dev_loss_tmo
	 * is left 0 to indicate accept transport defaults.  The
	 * driver communicates port role capabilities consistent
	 * with the PRLI response data.
	 */
	memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
	rpinfo.port_id = ndlp->nlp_DID;
	if (ndlp->nlp_type & NLP_NVME_TARGET)
		rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
	if (ndlp->nlp_type & NLP_NVME_INITIATOR)
		rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;

	if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
		rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;

	rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
	rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2700

2701
	spin_lock_irq(&vport->phba->hbalock);
2702
	oldrport = lpfc_ndlp_get_nrport(ndlp);
2703
	spin_unlock_irq(&vport->phba->hbalock);
2704
	if (!oldrport)
2705 2706
		lpfc_nlp_get(ndlp);

2707 2708 2709 2710 2711
	ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
	if (!ret) {
		/* If the ndlp already has an nrport, this is just
		 * a resume of the existing rport.  Else this is a
		 * new rport.
2712
		 */
2713 2714 2715 2716 2717 2718
		/* Guard against an unregister/reregister
		 * race that leaves the WAIT flag set.
		 */
		spin_lock_irq(&vport->phba->hbalock);
		ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
		spin_unlock_irq(&vport->phba->hbalock);
2719
		rport = remote_port->private;
2720
		if (oldrport) {
2721 2722 2723 2724
			/* New remoteport record does not guarantee valid
			 * host private memory area.
			 */
			prev_ndlp = oldrport->ndlp;
2725
			if (oldrport == remote_port->private) {
2726 2727 2728
				/* Same remoteport - ndlp should match.
				 * Just reuse.
				 */
2729 2730 2731 2732
				lpfc_printf_vlog(ndlp->vport, KERN_INFO,
						 LOG_NVME_DISC,
						 "6014 Rebinding lport to "
						 "remoteport %p wwpn 0x%llx, "
2733
						 "Data: x%x x%x %p %p x%x x%06x\n",
2734 2735 2736 2737
						 remote_port,
						 remote_port->port_name,
						 remote_port->port_id,
						 remote_port->port_role,
2738
						 prev_ndlp,
2739 2740 2741 2742 2743
						 ndlp,
						 ndlp->nlp_type,
						 ndlp->nlp_DID);
				return 0;
			}
2744

2745 2746 2747
			/* Sever the ndlp<->rport association
			 * before dropping the ndlp ref from
			 * register.
2748
			 */
2749
			spin_lock_irq(&vport->phba->hbalock);
2750
			ndlp->nrport = NULL;
2751
			ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2752
			spin_unlock_irq(&vport->phba->hbalock);
2753
			rport->ndlp = NULL;
2754
			rport->remoteport = NULL;
2755 2756 2757 2758 2759 2760 2761 2762 2763 2764

			/* Reference only removed if previous NDLP is no longer
			 * active. It might be just a swap and removing the
			 * reference would cause a premature cleanup.
			 */
			if (prev_ndlp && prev_ndlp != ndlp) {
				if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
				    (!prev_ndlp->nrport))
					lpfc_nlp_put(prev_ndlp);
			}
2765
		}
2766 2767 2768 2769

		/* Clean bind the rport to the ndlp. */
		rport->remoteport = remote_port;
		rport->lport = lport;
2770 2771
		rport->ndlp = ndlp;
		spin_lock_irq(&vport->phba->hbalock);
2772
		ndlp->nrport = rport;
2773
		spin_unlock_irq(&vport->phba->hbalock);
2774 2775 2776
		lpfc_printf_vlog(vport, KERN_INFO,
				 LOG_NVME_DISC | LOG_NODE,
				 "6022 Binding new rport to "
2777
				 "lport %p Remoteport %p rport %p WWNN 0x%llx, "
2778
				 "Rport WWPN 0x%llx DID "
2779 2780
				 "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
				 lport, remote_port, rport,
2781
				 rpinfo.node_name, rpinfo.port_name,
2782
				 rpinfo.port_id, rpinfo.port_role,
2783
				 ndlp, prev_ndlp);
2784
	} else {
2785 2786 2787 2788 2789
		lpfc_printf_vlog(vport, KERN_ERR,
				 LOG_NVME_DISC | LOG_NODE,
				 "6031 RemotePort Registration failed "
				 "err: %d, DID x%06x\n",
				 ret, ndlp->nlp_DID);
2790
	}
2791

2792
	return ret;
2793 2794 2795
#else
	return 0;
#endif
2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812
}

/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
 *
 * There is no notion of Devloss or rport recovery from the current
 * nvme_transport perspective.  Loss of an rport just means IO cannot
 * be sent and recovery is completely up to the initator.
 * For now, the driver just unbinds the DID and port_role so that
 * no further IO can be issued.  Changes are planned for later.
 *
 * Notes - the ndlp reference count is not decremented here since
 * since there is no nvme_transport api for devloss.  Node ref count
 * is only adjusted in driver unload.
 */
void
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
2813
#if (IS_ENABLED(CONFIG_NVME_FC))
2814 2815 2816 2817
	int ret;
	struct nvme_fc_local_port *localport;
	struct lpfc_nvme_lport *lport;
	struct lpfc_nvme_rport *rport;
2818
	struct nvme_fc_remote_port *remoteport = NULL;
2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831

	localport = vport->localport;

	/* This is fundamental error.  The localport is always
	 * available until driver unload.  Just exit.
	 */
	if (!localport)
		return;

	lport = (struct lpfc_nvme_lport *)localport->private;
	if (!lport)
		goto input_err;

2832
	spin_lock_irq(&vport->phba->hbalock);
2833
	rport = lpfc_ndlp_get_nrport(ndlp);
2834 2835 2836 2837
	if (rport)
		remoteport = rport->remoteport;
	spin_unlock_irq(&vport->phba->hbalock);
	if (!remoteport)
2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
		goto input_err;

	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
			 "6033 Unreg nvme remoteport %p, portname x%llx, "
			 "port_id x%06x, portstate x%x port type x%x\n",
			 remoteport, remoteport->port_name,
			 remoteport->port_id, remoteport->port_state,
			 ndlp->nlp_type);

	/* Sanity check ndlp type.  Only call for NVME ports. Don't
	 * clear any rport state until the transport calls back.
	 */
2850 2851

	if (ndlp->nlp_type & NLP_NVME_TARGET) {
2852 2853 2854
		/* No concern about the role change on the nvme remoteport.
		 * The transport will update it.
		 */
2855
		ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2856 2857 2858 2859 2860 2861 2862 2863 2864

		/* Don't let the host nvme transport keep sending keep-alives
		 * on this remoteport. Vport is unloading, no recovery. The
		 * return values is ignored.  The upcall is a courtesy to the
		 * transport.
		 */
		if (vport->load_flag & FC_UNLOADING)
			(void)nvme_fc_set_remoteport_devloss(remoteport, 0);

2865
		ret = nvme_fc_unregister_remoteport(remoteport);
2866 2867
		if (ret != 0) {
			lpfc_nlp_put(ndlp);
2868 2869 2870 2871
			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
					 "6167 NVME unregister failed %d "
					 "port_state x%x\n",
					 ret, remoteport->port_state);
2872
		}
2873 2874 2875 2876
	}
	return;

 input_err:
2877
#endif
2878
	lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
J
James Smart 已提交
2879
			 "6168 State error: lport %p, rport%p FCID x%06x\n",
2880 2881
			 vport->localport, ndlp->rport, ndlp->nlp_DID);
}
2882 2883 2884 2885 2886 2887 2888

/**
 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
 * @phba: pointer to lpfc hba data structure.
 * @axri: pointer to the fcp xri abort wcqe structure.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
2889 2890
 * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
 * here.
2891 2892 2893 2894 2895 2896 2897
 **/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
			   struct sli4_wcqe_xri_aborted *axri)
{
	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
	struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2898
	struct nvmefc_fcp_req *nvme_cmd = NULL;
2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909
	struct lpfc_nodelist *ndlp;
	unsigned long iflag = 0;

	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
		return;
	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
	list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
				 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
				 list) {
		if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2910
			list_del_init(&lpfc_ncmd->list);
2911 2912 2913 2914 2915 2916 2917
			lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
			lpfc_ncmd->status = IOSTAT_SUCCESS;
			spin_unlock(
				&phba->sli4_hba.abts_nvme_buf_list_lock);

			spin_unlock_irqrestore(&phba->hbalock, iflag);
			ndlp = lpfc_ncmd->ndlp;
2918
			if (ndlp)
2919
				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
J
James Smart 已提交
2920 2921

			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2922 2923 2924 2925 2926 2927 2928 2929 2930
					"6311 nvme_cmd %p xri x%x tag x%x "
					"abort complete and xri released\n",
					lpfc_ncmd->nvmeCmd, xri,
					lpfc_ncmd->cur_iocbq.iotag);

			/* Aborted NVME commands are required to not complete
			 * before the abort exchange command fully completes.
			 * Once completed, it is available via the put list.
			 */
2931 2932 2933 2934 2935
			if (lpfc_ncmd->nvmeCmd) {
				nvme_cmd = lpfc_ncmd->nvmeCmd;
				nvme_cmd->done(nvme_cmd);
				lpfc_ncmd->nvmeCmd = NULL;
			}
2936 2937 2938 2939 2940 2941
			lpfc_release_nvme_buf(phba, lpfc_ncmd);
			return;
		}
	}
	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
	spin_unlock_irqrestore(&phba->hbalock, iflag);
J
James Smart 已提交
2942 2943 2944 2945

	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
			"6312 XRI Aborted xri x%x not found\n", xri);

2946
}
2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963

/**
 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
 * @phba: Pointer to HBA context object.
 *
 * This function flushes all wqes in the nvme rings and frees all resources
 * in the txcmplq. This function does not issue abort wqes for the IO
 * commands in txcmplq, they will just be returned with
 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
 * slot has been permanently disabled.
 **/
void
lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
{
	struct lpfc_sli_ring  *pring;
	u32 i, wait_cnt = 0;

2964
	if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
2965 2966 2967 2968 2969 2970 2971 2972
		return;

	/* Cycle through all NVME rings and make sure all outstanding
	 * WQEs have been removed from the txcmplqs.
	 */
	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
		pring = phba->sli4_hba.nvme_wq[i]->pring;

2973 2974 2975
		if (!pring)
			continue;

2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
		/* Retrieve everything on the txcmplq */
		while (!list_empty(&pring->txcmplq)) {
			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
			wait_cnt++;

			/* The sleep is 10mS.  Every ten seconds,
			 * dump a message.  Something is wrong.
			 */
			if ((wait_cnt % 1000) == 0) {
				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
						"6178 NVME IO not empty, "
						"cnt %d\n", wait_cnt);
			}
		}
	}
}