提交 cecb3540 编写于 作者: L Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Driver fixes and and one core patch.

  Nine of the driver patches are minor fixes and reworks to lpfc and the
  rest are trivial and minor fixes elsewhere"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: pmcraid: Fix missing resource cleanup in error case
  scsi: ipr: Fix missing/incorrect resource cleanup in error case
  scsi: mpt3sas: Fix out-of-bounds compiler warning
  scsi: lpfc: Update lpfc version to 14.2.0.4
  scsi: lpfc: Allow reduced polling rate for nvme_admin_async_event cmd completion
  scsi: lpfc: Add more logging of cmd and cqe information for aborted NVMe cmds
  scsi: lpfc: Fix port stuck in bypassed state after LIP in PT2PT topology
  scsi: lpfc: Resolve NULL ptr dereference after an ELS LOGO is aborted
  scsi: lpfc: Address NULL pointer dereference after starget_to_rport()
  scsi: lpfc: Resolve some cleanup issues following SLI path refactoring
  scsi: lpfc: Resolve some cleanup issues following abort path refactoring
  scsi: lpfc: Correct BDE type for XMIT_SEQ64_WQE in lpfc_ct_reject_event()
  scsi: vmw_pvscsi: Expand vcpuHint to 16 bits
  scsi: sd: Fix interpretation of VPD B9h length
...@@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) ...@@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
GFP_KERNEL); GFP_KERNEL);
if (!ioa_cfg->hrrq[i].host_rrq) { if (!ioa_cfg->hrrq[i].host_rrq) {
while (--i > 0) while (--i >= 0)
dma_free_coherent(&pdev->dev, dma_free_coherent(&pdev->dev,
sizeof(u32) * ioa_cfg->hrrq[i].size, sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg->hrrq[i].host_rrq, ioa_cfg->hrrq[i].host_rrq,
...@@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, ...@@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->vectors_info[i].desc, ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]); &ioa_cfg->hrrq[i]);
if (rc) { if (rc) {
while (--i >= 0) while (--i > 0)
free_irq(pci_irq_vector(pdev, i), free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]); &ioa_cfg->hrrq[i]);
return rc; return rc;
......
...@@ -420,8 +420,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, ...@@ -420,8 +420,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
uint32_t); uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *); struct lpfc_iocbq *);
void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
struct lpfc_wcqe_complete *w);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
...@@ -630,7 +628,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, ...@@ -630,7 +628,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp); struct lpfc_nodelist *ndlp);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl); struct lpfc_iocbq *rspiocb);
void lpfc_create_multixri_pools(struct lpfc_hba *phba); void lpfc_create_multixri_pools(struct lpfc_hba *phba);
void lpfc_create_destroy_pools(struct lpfc_hba *phba); void lpfc_create_destroy_pools(struct lpfc_hba *phba);
void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid); void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
......
...@@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, ...@@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
memset(bpl, 0, sizeof(struct ulp_bde64)); memset(bpl, 0, sizeof(struct ulp_bde64));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64; bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl->tus.w = le32_to_cpu(bpl->tus.w);
......
...@@ -2998,10 +2998,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -2998,10 +2998,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
ulp_word4); ulp_word4);
/* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
if (lpfc_error_lost_link(ulp_status, ulp_word4)) { if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
skip_recovery = 1; skip_recovery = 1;
goto out; goto out;
} }
...@@ -3021,18 +3018,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -3021,18 +3018,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&ndlp->lock); spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, cmdiocb, lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM); NLP_EVT_DEVICE_RM);
lpfc_els_free_iocb(phba, cmdiocb); goto out_rsrc_free;
lpfc_nlp_put(ndlp);
/* Presume the node was released. */
return;
} }
out: out:
/* Driver is done with the IO. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
/* At this point, the LOGO processing is complete. NOTE: For a /* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change * pt2pt topology, we are assuming the NPortID will only change
* on link up processing. For a LOGO / PLOGI initiated by the * on link up processing. For a LOGO / PLOGI initiated by the
...@@ -3059,6 +3048,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -3059,6 +3048,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
ulp_word4, tmo, ulp_word4, tmo,
vport->num_disc_nodes); vport->num_disc_nodes);
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
lpfc_disc_start(vport); lpfc_disc_start(vport);
return; return;
} }
...@@ -3075,6 +3068,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -3075,6 +3068,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb, lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM); NLP_EVT_DEVICE_RM);
} }
out_rsrc_free:
/* Driver is done with the I/O. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
} }
/** /**
......
...@@ -4487,6 +4487,9 @@ struct wqe_common { ...@@ -4487,6 +4487,9 @@ struct wqe_common {
#define wqe_sup_SHIFT 6 #define wqe_sup_SHIFT 6
#define wqe_sup_MASK 0x00000001 #define wqe_sup_MASK 0x00000001
#define wqe_sup_WORD word11 #define wqe_sup_WORD word11
#define wqe_ffrq_SHIFT 6
#define wqe_ffrq_MASK 0x00000001
#define wqe_ffrq_WORD word11
#define wqe_wqec_SHIFT 7 #define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001 #define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11 #define wqe_wqec_WORD word11
......
...@@ -12188,7 +12188,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba) ...@@ -12188,7 +12188,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
rc = pci_enable_msi(phba->pcidev); rc = pci_enable_msi(phba->pcidev);
if (!rc) if (!rc)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0462 PCI enable MSI mode success.\n"); "0012 PCI enable MSI mode success.\n");
else { else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0471 PCI enable MSI mode failed (%d)\n", rc); "0471 PCI enable MSI mode failed (%d)\n", rc);
......
...@@ -834,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -834,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nvmet_invalidate_host(phba, ndlp); lpfc_nvmet_invalidate_host(phba, ndlp);
if (ndlp->nlp_DID == Fabric_DID) { if (ndlp->nlp_DID == Fabric_DID) {
if (vport->port_state <= LPFC_FDISC) if (vport->port_state <= LPFC_FDISC ||
vport->fc_flag & FC_PT2PT)
goto out; goto out;
lpfc_linkdown_port(vport); lpfc_linkdown_port(vport);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
......
...@@ -1065,25 +1065,37 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, ...@@ -1065,25 +1065,37 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = wcqe->parameter; nCmd->rcv_rsplen = wcqe->parameter;
nCmd->status = 0; nCmd->status = 0;
/* Get the NVME cmd details for this unique error. */
cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
/* Check if this is really an ERSP */ /* Check if this is really an ERSP */
if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
lpfc_ncmd->status = IOSTAT_SUCCESS; lpfc_ncmd->status = IOSTAT_SUCCESS;
lpfc_ncmd->result = 0; lpfc_ncmd->result = 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6084 NVME Completion ERSP: " "6084 NVME FCP_ERR ERSP: "
"xri %x placed x%x\n", "xri %x placed x%x opcode x%x cmd_id "
lpfc_ncmd->cur_iocbq.sli4_xritag, "x%x cqe_status x%x\n",
wcqe->total_data_placed); lpfc_ncmd->cur_iocbq.sli4_xritag,
wcqe->total_data_placed,
cp->sqe.common.opcode,
cp->sqe.common.command_id,
ep->cqe.status);
break; break;
} }
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6081 NVME Completion Protocol Error: " "6081 NVME Completion Protocol Error: "
"xri %x status x%x result x%x " "xri %x status x%x result x%x "
"placed x%x\n", "placed x%x opcode x%x cmd_id x%x, "
"cqe_status x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->status, lpfc_ncmd->result, lpfc_ncmd->status, lpfc_ncmd->result,
wcqe->total_data_placed); wcqe->total_data_placed,
cp->sqe.common.opcode,
cp->sqe.common.command_id,
ep->cqe.status);
break; break;
case IOSTAT_LOCAL_REJECT: case IOSTAT_LOCAL_REJECT:
/* Let fall through to set command final state. */ /* Let fall through to set command final state. */
...@@ -1195,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, ...@@ -1195,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
{ {
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); struct nvme_common_command *sqe;
struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
union lpfc_wqe128 *wqe = &pwqeq->wqe; union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len; uint32_t req_len;
...@@ -1252,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, ...@@ -1252,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
cstat->control_requests++; cstat->control_requests++;
} }
if (pnode->nlp_nvme_info & NLP_NVME_NSLER) if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
bf_set(wqe_erp, &wqe->generic.wqe_com, 1); bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
sqe = &((struct nvme_fc_cmd_iu *)
nCmd->cmdaddr)->sqe.common;
if (sqe->opcode == nvme_admin_async_event)
bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
}
/* /*
* Finish initializing those WQE fields that are independent * Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer * of the nvme_cmnd request_buffer
...@@ -1787,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ...@@ -1787,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
* @phba: Pointer to HBA context object * @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object. * @cmdiocb: Pointer to command iocb object.
* @abts_cmpl: Pointer to wcqe complete object. * @rspiocb: Pointer to response iocb object.
* *
* This is the callback function for any NVME FCP IO that was aborted. * This is the callback function for any NVME FCP IO that was aborted.
* *
...@@ -1796,8 +1815,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ...@@ -1796,8 +1815,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
**/ **/
void void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl) struct lpfc_iocbq *rspiocb)
{ {
struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME, lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6145 ABORT_XRI_CN completing on rpi x%x " "6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x " "original iotag x%x, abort cmd iotag x%x "
...@@ -1840,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ...@@ -1840,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv; struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags; unsigned long flags;
int ret_val; int ret_val;
struct nvme_fc_cmd_iu *cp;
/* Validate pointers. LLDD fault handling with transport does /* Validate pointers. LLDD fault handling with transport does
* have timing races. * have timing races.
...@@ -1963,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ...@@ -1963,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return; return;
} }
/*
* Get Command Id from cmd to plug into response. This
* code is not needed in the next NVME Transport drop.
*/
cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for " "6138 Transport Abort NVME Request Issued for "
"ox_id x%x\n", "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
nvmereq_wqe->sli4_xritag); nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
cp->sqe.common.command_id);
return; return;
out_unlock: out_unlock:
......
...@@ -6062,6 +6062,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) ...@@ -6062,6 +6062,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int status; int status;
u32 logit = LOG_FCP; u32 logit = LOG_FCP;
if (!rport)
return FAILED;
rdata = rport->dd_data; rdata = rport->dd_data;
if (!rdata || !rdata->pnode) { if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
...@@ -6140,6 +6143,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) ...@@ -6140,6 +6143,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
unsigned long flags; unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
if (!rport)
return FAILED;
rdata = rport->dd_data; rdata = rport->dd_data;
if (!rdata || !rdata->pnode) { if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
......
...@@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) ...@@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
sync_buf = __lpfc_sli_get_iocbq(phba); sync_buf = __lpfc_sli_get_iocbq(phba);
if (!sync_buf) { if (!sync_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
"6213 No available WQEs for CMF_SYNC_WQE\n"); "6244 No available WQEs for CMF_SYNC_WQE\n");
ret_val = ENOMEM; ret_val = ENOMEM;
goto out_unlock; goto out_unlock;
} }
...@@ -3805,7 +3805,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ...@@ -3805,7 +3805,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
set_job_ulpword4(cmdiocbp, set_job_ulpword4(cmdiocbp,
IOERR_ABORT_REQUESTED); IOERR_ABORT_REQUESTED);
/* /*
* For SLI4, irsiocb contains * For SLI4, irspiocb contains
* NO_XRI in sli_xritag, it * NO_XRI in sli_xritag, it
* shall not affect releasing * shall not affect releasing
* sgl (xri) process. * sgl (xri) process.
...@@ -3823,7 +3823,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ...@@ -3823,7 +3823,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
} }
} }
} }
(cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq); cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
} else } else
lpfc_sli_release_iocbq(phba, cmdiocbp); lpfc_sli_release_iocbq(phba, cmdiocbp);
} else { } else {
...@@ -4063,8 +4063,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, ...@@ -4063,8 +4063,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (cmdiocbq->cmd_cmpl) { if (cmdiocbq->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
(cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
&rspiocbq);
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
} }
break; break;
...@@ -10288,7 +10287,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, ...@@ -10288,7 +10287,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* @flag: Flag indicating if this command can be put into txq. * @flag: Flag indicating if this command can be put into txq.
* *
* __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
* send an iocb command to an HBA with SLI-4 interface spec. * send an iocb command to an HBA with SLI-3 interface spec.
* *
* This function takes the hbalock before invoking the lockless version. * This function takes the hbalock before invoking the lockless version.
* The function will return success after it successfully submit the wqe to * The function will return success after it successfully submit the wqe to
...@@ -12740,7 +12739,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, ...@@ -12740,7 +12739,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
cmdiocbq->wait_cmd_cmpl = NULL; cmdiocbq->wait_cmd_cmpl = NULL;
if (cmdiocbq->cmd_cmpl) if (cmdiocbq->cmd_cmpl)
(cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL); cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
else else
lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_sli_release_iocbq(phba, cmdiocbq);
return; return;
...@@ -12754,9 +12753,9 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, ...@@ -12754,9 +12753,9 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
/* Set the exchange busy flag for task management commands */ /* Set the exchange busy flag for task management commands */
if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
!(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq); cur_iocbq);
if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
lpfc_cmd->flags |= LPFC_SBUF_XBUSY; lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
else else
...@@ -13896,7 +13895,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) ...@@ -13896,7 +13895,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
* @irspiocbq: Pointer to work-queue completion queue entry. * @irspiocbq: Pointer to work-queue completion queue entry.
* *
* This routine handles an ELS work-queue completion event and construct * This routine handles an ELS work-queue completion event and construct
* a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
* discovery engine to handle. * discovery engine to handle.
* *
* Return: Pointer to the receive IOCBQ, NULL otherwise. * Return: Pointer to the receive IOCBQ, NULL otherwise.
...@@ -13940,7 +13939,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, ...@@ -13940,7 +13939,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
if (bf_get(lpfc_wcqe_c_xb, wcqe)) { if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
spin_unlock_irqrestore(&phba->hbalock, iflags); spin_unlock_irqrestore(&phba->hbalock, iflags);
} }
...@@ -14799,7 +14798,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, ...@@ -14799,7 +14798,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Pass the cmd_iocb and the wcqe to the upper layer */ /* Pass the cmd_iocb and the wcqe to the upper layer */
memcpy(&cmdiocbq->wcqe_cmpl, wcqe, memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
sizeof(struct lpfc_wcqe_complete)); sizeof(struct lpfc_wcqe_complete));
(cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq); cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
} else { } else {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0375 FCP cmdiocb not callback function " "0375 FCP cmdiocb not callback function "
...@@ -18956,7 +18955,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, ...@@ -18956,7 +18955,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
/* Free iocb created in lpfc_prep_seq */ /* Free iocb created in lpfc_prep_seq */
list_for_each_entry_safe(curr_iocb, next_iocb, list_for_each_entry_safe(curr_iocb, next_iocb,
&iocbq->list, list) { &iocbq->list, list) {
list_del_init(&curr_iocb->list); list_del_init(&curr_iocb->list);
lpfc_sli_release_iocbq(phba, curr_iocb); lpfc_sli_release_iocbq(phba, curr_iocb);
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "14.2.0.3" #define LPFC_DRIVER_VERSION "14.2.0.4"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */
......
...@@ -5369,6 +5369,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) ...@@ -5369,6 +5369,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply; Mpi2ConfigReply_t mpi_reply;
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
u16 depth;
int sz; int sz;
int rc = 0; int rc = 0;
...@@ -5380,7 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) ...@@ -5380,7 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
goto out; goto out;
/* sas iounit page 1 */ /* sas iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData); sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
if (!sas_iounit_pg1) { if (!sas_iounit_pg1) {
pr_err("%s: failure at %s:%d/%s()!\n", pr_err("%s: failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__); ioc->name, __FILE__, __LINE__, __func__);
...@@ -5393,16 +5394,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) ...@@ -5393,16 +5394,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
ioc->name, __FILE__, __LINE__, __func__); ioc->name, __FILE__, __LINE__, __func__);
goto out; goto out;
} }
ioc->max_wideport_qd =
(le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
MPT3SAS_SAS_QUEUE_DEPTH;
ioc->max_narrowport_qd = depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
(le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
MPT3SAS_SAS_QUEUE_DEPTH; depth = sas_iounit_pg1->SATAMaxQDepth;
ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
/* pcie iounit page 1 */ /* pcie iounit page 1 */
rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
&pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
......
...@@ -4031,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) ...@@ -4031,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
return 0; return 0;
out_unwind: out_unwind:
while (--i > 0) while (--i >= 0)
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
return rc; return rc;
......
...@@ -3072,7 +3072,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp) ...@@ -3072,7 +3072,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp)
goto out; goto out;
/* We must have at least a 64B header and one 32B range descriptor */ /* We must have at least a 64B header and one 32B range descriptor */
vpd_len = get_unaligned_be16(&buffer[2]) + 3; vpd_len = get_unaligned_be16(&buffer[2]) + 4;
if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
sd_printk(KERN_ERR, sdkp, sd_printk(KERN_ERR, sdkp,
"Invalid Concurrent Positioning Ranges VPD page\n"); "Invalid Concurrent Positioning Ranges VPD page\n");
......
...@@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc { ...@@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc {
u8 tag; u8 tag;
u8 bus; u8 bus;
u8 target; u8 target;
u8 vcpuHint; u16 vcpuHint;
u8 unused[59]; u8 unused[58];
} __packed; } __packed;
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册