提交 a789241e 编写于 作者: J James Smart 提交者: Martin K. Petersen

scsi: lpfc: Fix NMI crash during rmmod due to circular hbalock dependency

Remove hbalock dependency for lpfc_abts_els_sgl_list and
lpfc_abts_nvmet_ctx_list.  The lists are adaquately synchronized with the
sgl_list_lock and abts_nvmet_buf_list_lock.

Link: https://lore.kernel.org/r/20210412013127.2387-5-jsmart2021@gmail.comCo-developed-by: NJustin Tee <justin.tee@broadcom.com>
Signed-off-by: NJustin Tee <justin.tee@broadcom.com>
Signed-off-by: NJames Smart <jsmart2021@gmail.com>
Signed-off-by: NMartin K. Petersen <martin.petersen@oracle.com>
上级 f866eb06
...@@ -10072,8 +10072,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) ...@@ -10072,8 +10072,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0; unsigned long iflag = 0;
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next, list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) { &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
...@@ -10081,8 +10080,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) ...@@ -10081,8 +10080,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
sglq_entry->ndlp = NULL; sglq_entry->ndlp = NULL;
} }
} }
spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
...@@ -10109,8 +10107,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, ...@@ -10109,8 +10107,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
pring = lpfc_phba_elsring(phba); pring = lpfc_phba_elsring(phba);
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next, list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) { &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) { if (sglq_entry->sli4_xritag == xri) {
...@@ -10120,8 +10117,8 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, ...@@ -10120,8 +10117,8 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
list_add_tail(&sglq_entry->list, list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_els_sgl_list); &phba->sli4_hba.lpfc_els_sgl_list);
sglq_entry->state = SGL_FREED; sglq_entry->state = SGL_FREED;
spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
spin_unlock_irqrestore(&phba->hbalock, iflag); iflag);
if (ndlp) { if (ndlp) {
lpfc_set_rrq_active(phba, ndlp, lpfc_set_rrq_active(phba, ndlp,
...@@ -10136,21 +10133,18 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, ...@@ -10136,21 +10133,18 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
return; return;
} }
} }
spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
lxri = lpfc_sli4_xri_inrange(phba, xri); lxri = lpfc_sli4_xri_inrange(phba, xri);
if (lxri == NO_XRI) { if (lxri == NO_XRI)
spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
}
spin_lock(&phba->sli4_hba.sgl_list_lock); spin_lock_irqsave(&phba->hbalock, iflag);
sglq_entry = __lpfc_get_active_sglq(phba, lxri); sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
sglq_entry->state = SGL_XRI_ABORTED; sglq_entry->state = SGL_XRI_ABORTED;
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
......
...@@ -1043,12 +1043,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) ...@@ -1043,12 +1043,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
* driver is unloading or reposted if the driver is restarting * driver is unloading or reposted if the driver is restarting
* the port. * the port.
*/ */
spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
/* scsl_buf_list */
/* sgl_list_lock required because worker thread uses this /* sgl_list_lock required because worker thread uses this
* list. * list.
*/ */
spin_lock(&phba->sli4_hba.sgl_list_lock); spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry(sglq_entry, list_for_each_entry(sglq_entry,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
sglq_entry->state = SGL_FREED; sglq_entry->state = SGL_FREED;
...@@ -1057,11 +1056,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) ...@@ -1057,11 +1056,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
&phba->sli4_hba.lpfc_els_sgl_list); &phba->sli4_hba.lpfc_els_sgl_list);
spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
/* abts_xxxx_buf_list_lock required because worker thread uses this /* abts_xxxx_buf_list_lock required because worker thread uses this
* list. * list.
*/ */
spin_lock_irq(&phba->hbalock);
cnt = 0; cnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx]; qp = &phba->sli4_hba.hdwq[idx];
...@@ -3804,12 +3804,10 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) ...@@ -3804,12 +3804,10 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
sglq_entry->state = SGL_FREED; sglq_entry->state = SGL_FREED;
list_add_tail(&sglq_entry->list, &els_sgl_list); list_add_tail(&sglq_entry->list, &els_sgl_list);
} }
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&els_sgl_list, list_splice_init(&els_sgl_list,
&phba->sli4_hba.lpfc_els_sgl_list); &phba->sli4_hba.lpfc_els_sgl_list);
spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
/* els xri-sgl shrinked */ /* els xri-sgl shrinked */
xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
...@@ -3817,8 +3815,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) ...@@ -3817,8 +3815,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
"3158 ELS xri-sgl count decreased from " "3158 ELS xri-sgl count decreased from "
"%d to %d\n", phba->sli4_hba.els_xri_cnt, "%d to %d\n", phba->sli4_hba.els_xri_cnt,
els_xri_cnt); els_xri_cnt);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
&els_sgl_list); &els_sgl_list);
/* release extra els sgls from list */ /* release extra els sgls from list */
...@@ -3833,8 +3830,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) ...@@ -3833,8 +3830,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
} }
list_splice_init(&els_sgl_list, list_splice_init(&els_sgl_list,
&phba->sli4_hba.lpfc_els_sgl_list); &phba->sli4_hba.lpfc_els_sgl_list);
spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
} else } else
lpfc_printf_log(phba, KERN_INFO, LOG_SLI, lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3163 ELS xri-sgl count unchanged: %d\n", "3163 ELS xri-sgl count unchanged: %d\n",
...@@ -7388,11 +7384,9 @@ lpfc_free_els_sgl_list(struct lpfc_hba *phba) ...@@ -7388,11 +7384,9 @@ lpfc_free_els_sgl_list(struct lpfc_hba *phba)
LIST_HEAD(sglq_list); LIST_HEAD(sglq_list);
/* Retrieve all els sgls from driver list */ /* Retrieve all els sgls from driver list */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
/* Now free the sgl list */ /* Now free the sgl list */
lpfc_free_sgl_list(phba, &sglq_list); lpfc_free_sgl_list(phba, &sglq_list);
......
...@@ -1440,7 +1440,10 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, ...@@ -1440,7 +1440,10 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
list_del_init(&ctx_buf->list); list_del_init(&ctx_buf->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_lock(&phba->hbalock);
__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
spin_unlock(&phba->hbalock);
ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL; ctx_buf->sglq->ndlp = NULL;
...@@ -1787,8 +1790,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, ...@@ -1787,8 +1790,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe); atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
} }
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp, list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) { list) {
...@@ -1806,10 +1808,10 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, ...@@ -1806,10 +1808,10 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
} }
ctxp->flag &= ~LPFC_NVME_XBUSY; ctxp->flag &= ~LPFC_NVME_XBUSY;
spin_unlock(&ctxp->ctxlock); spin_unlock(&ctxp->ctxlock);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
iflag);
rrq_empty = list_empty(&phba->active_rrq_list); rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
if (ndlp && if (ndlp &&
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
...@@ -1830,9 +1832,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, ...@@ -1830,9 +1832,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
lpfc_worker_wake_up(phba); lpfc_worker_wake_up(phba);
return; return;
} }
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
if (ctxp) { if (ctxp) {
/* /*
...@@ -1876,8 +1876,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, ...@@ -1876,8 +1876,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
sid = sli4_sid_from_fc_hdr(fc_hdr); sid = sli4_sid_from_fc_hdr(fc_hdr);
oxid = be16_to_cpu(fc_hdr->fh_ox_id); oxid = be16_to_cpu(fc_hdr->fh_ox_id);
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp, list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) { list) {
...@@ -1886,9 +1885,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, ...@@ -1886,9 +1885,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
xri = ctxp->ctxbuf->sglq->sli4_xritag; xri = ctxp->ctxbuf->sglq->sli4_xritag;
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
spin_unlock_irqrestore(&phba->hbalock, iflag); iflag);
spin_lock_irqsave(&ctxp->ctxlock, iflag); spin_lock_irqsave(&ctxp->ctxlock, iflag);
ctxp->flag |= LPFC_NVME_ABTS_RCV; ctxp->flag |= LPFC_NVME_ABTS_RCV;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag); spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
...@@ -1907,9 +1905,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, ...@@ -1907,9 +1905,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0; return 0;
} }
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
spin_unlock_irqrestore(&phba->hbalock, iflag);
/* check the wait list */ /* check the wait list */
if (phba->sli4_hba.nvmet_io_wait_cnt) { if (phba->sli4_hba.nvmet_io_wait_cnt) {
struct rqb_dmabuf *nvmebuf; struct rqb_dmabuf *nvmebuf;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册