提交 2b7824d0 编写于 作者: J James Smart 提交者: Christoph Hellwig

Fix driver load issues when MRQ=8

The symptom is that the driver will fail to login to the fabric.
The reason is because it is out of iocb resources.

There is a one to one relationship between MRQs
(receive buffers for NVMET-FC) and iocbs and the default number of
IOCBs was not accounting for the number of MRQs that were being created.

This fix aligns the number of MRQ resources with the total resources so
that it can handle fabric events when needed.

Also the initialization of ctxlock to be on FCP commands, NOT LS commands.
And modified log messages so that the log output can be correlated with
the analyzer trace.
Signed-off-by: NDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: NJames Smart <james.smart@broadcom.com>
Reviewed-by: NJohannes Thumshirn <jthumshirn@suse.de>
上级 59c6e13e
......@@ -11061,7 +11061,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct lpfc_hba *phba;
struct lpfc_vport *vport = NULL;
struct Scsi_Host *shost = NULL;
int error;
int error, cnt;
uint32_t cfg_mode, intr_mode;
/* Allocate memory for HBA structure */
......@@ -11095,11 +11095,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_unset_pci_mem_s4;
}
cnt = phba->cfg_iocb_cnt * 1024;
if (phba->nvmet_support)
cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2821 initialize iocb list %d.\n",
phba->cfg_iocb_cnt*1024);
error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
"2821 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
error = lpfc_init_iocb_list(phba, cnt);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
......
......@@ -1495,6 +1495,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
"io buffer. Skipping abort req.\n");
return;
}
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/*
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's
......@@ -1508,20 +1509,19 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
"6143 NVME req mismatch: "
"lpfc_nbuf %p nvmeCmd %p, "
"pnvme_fcreq %p. Skipping Abort\n",
"pnvme_fcreq %p. Skipping Abort xri x%x\n",
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
pnvme_fcreq);
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
return;
}
/* Don't abort IOs no longer on the pending queue. */
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
"6142 NVME IO req %p not queued - skipping "
"abort req\n",
pnvme_fcreq);
"abort req xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
return;
}
......@@ -1535,8 +1535,9 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq %p, "
"lpfc_ncmd %p\n",
pnvme_fcreq, lpfc_nbuf);
"lpfc_ncmd %p xri x%x\n",
pnvme_fcreq, lpfc_nbuf,
nvmereq_wqe->sli4_xritag);
return;
}
......@@ -1545,8 +1546,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
"6136 No available abort wqes. Skipping "
"Abts req for nvme_fcreq %p.\n",
pnvme_fcreq);
"Abts req for nvme_fcreq %p xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
return;
}
......@@ -1604,7 +1605,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
"6138 Transport Abort NVME Request Issued for\n"
"6138 Transport Abort NVME Request Issued for "
"ox_id x%x on reqtag x%x\n",
nvmereq_wqe->sli4_xritag,
abts_buf->iotag);
......@@ -2491,7 +2492,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
input_err:
#endif
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6168: State error: lport %p, rport%p FCID x%06x\n",
"6168 State error: lport %p, rport%p FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
......
......@@ -872,7 +872,6 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ctxp->wqeq = NULL;
ctxp->state = LPFC_NVMET_STE_RCV;
ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
oxid, size, sid);
......@@ -981,6 +980,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->rqb_buffer = nvmebuf;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on) {
......@@ -1003,8 +1003,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
}
#endif
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n",
oxid, size, sid);
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
oxid, size, smp_processor_id());
atomic_inc(&tgtp->rcv_fcp_cmd_in);
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册