提交 e268d708 编写于 作者: L Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Three fixes, all in drivers.

  The ufs and qedi fixes are minor; the lpfc one is a bit bigger because
  it involves adding a heuristic to detect and deal with common but not
  standards compliant behaviour"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: ufs: core: Fix divide by zero in ufshcd_map_queues()
  scsi: lpfc: Fix pt2pt NVMe PRLI reject LOGO loop
  scsi: qedi: Fix ABBA deadlock in qedi_process_tmf_resp() and qedi_process_cmd_cleanup_resp()
...@@ -592,6 +592,7 @@ struct lpfc_vport { ...@@ -592,6 +592,7 @@ struct lpfc_vport {
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
......
...@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost) ...@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST; pmboxq->u.mb.mbxOwner = OWN_HOST;
if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
vport->fc_flag &= ~FC_PT2PT_NO_NVME;
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) && if ((mbxstatus == MBX_SUCCESS) &&
......
...@@ -1072,7 +1072,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -1072,7 +1072,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FLOGI failed, so there is no fabric */ /* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
FC_PT2PT_NO_NVME);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be /* If private loop, then allow max outstanding els to be
...@@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Added for Vendor specifc support /* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes * Just keep retrying for these Rsn / Exp codes
*/ */
if ((vport->fc_flag & FC_PT2PT) &&
cmd == ELS_CMD_NVMEPRLI) {
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
case LSRJT_INVALID_CMD:
case LSRJT_LOGICAL_ERR:
case LSRJT_CMD_UNSUPPORTED:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0168 NVME PRLI LS_RJT "
"reason %x port doesn't "
"support NVME, disabling NVME\n",
stat.un.b.lsRjtRsnCode);
retry = 0;
vport->fc_flag |= FC_PT2PT_NO_NVME;
goto out_retry;
}
}
switch (stat.un.b.lsRjtRsnCode) { switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC: case LSRJT_UNABLE_TPC:
/* The driver has a VALID PLOGI but the rport has /* The driver has a VALID PLOGI but the rport has
......
...@@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, ...@@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it. * is configured try it.
*/ */
ndlp->nlp_fc4_type |= NLP_FC4_FCP; ndlp->nlp_fc4_type |= NLP_FC4_FCP;
if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
(vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME; ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */ /* We need to update the localport also */
lpfc_nvme_update_localport(vport); lpfc_nvme_update_localport(vport);
......
...@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, ...@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->list_tmf_work = NULL; qedi_cmd->list_tmf_work = NULL;
} }
} }
spin_unlock_bh(&qedi_conn->tmf_work_lock);
if (!found) { if (!found)
spin_unlock_bh(&qedi_conn->tmf_work_lock);
goto check_cleanup_reqs; goto check_cleanup_reqs;
}
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
...@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, ...@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->state = CLEANUP_RECV; qedi_cmd->state = CLEANUP_RECV;
unlock: unlock:
spin_unlock_bh(&conn->session->back_lock); spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&qedi_conn->tmf_work_lock);
wake_up_interruptible(&qedi_conn->wait_queue); wake_up_interruptible(&qedi_conn->wait_queue);
return; return;
......
...@@ -2681,7 +2681,7 @@ static int ufshcd_map_queues(struct Scsi_Host *shost) ...@@ -2681,7 +2681,7 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
break; break;
case HCTX_TYPE_READ: case HCTX_TYPE_READ:
map->nr_queues = 0; map->nr_queues = 0;
break; continue;
default: default:
WARN_ON_ONCE(true); WARN_ON_ONCE(true);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册