提交 678ea9b5 编写于 作者: S Steve Wise 提交者: Roland Dreier

RDMA/cxgb4: Only call CQ completion handler if it is armed

The function __flush_qp() always calls the ULP's CQ completion handler
functions even if the CQ was not armed.  This can crash the system if
the function pointer is NULL. The iSER ULP behaves this way: no
completion handler and never arm the CQ for notification.  So now we
track whether the CQ is armed at flush time and only call the
completion handlers if their CQs were armed.

Also, if the RCQ and SCQ are the same CQ, the completion handler is
getting called twice.  It should only be called once after all SQ and
RQ WRs are flushed from the QP.  So rearrange the logic to fix this.
Signed-off-by: NSteve Wise <swise@opengridcomputing.com>
Signed-off-by: NRoland Dreier <roland@purestorage.com>
上级 64aa90f2
...@@ -182,6 +182,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) ...@@ -182,6 +182,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
chp = get_chp(dev, qid); chp = get_chp(dev, qid);
if (chp) { if (chp) {
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag); spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag); spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
......
...@@ -1066,7 +1066,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1066,7 +1066,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
struct c4iw_cq *schp) struct c4iw_cq *schp)
{ {
int count; int count;
int flushed; int rq_flushed, sq_flushed;
unsigned long flag; unsigned long flag;
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
...@@ -1084,28 +1084,41 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1084,28 +1084,41 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
c4iw_flush_hw_cq(rchp); c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag); spin_unlock_irqrestore(&rchp->lock, flag);
if (flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag); spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
if (schp != rchp) if (schp != rchp)
c4iw_flush_hw_cq(schp); c4iw_flush_hw_cq(schp);
flushed = c4iw_flush_sq(qhp); sq_flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag); spin_unlock_irqrestore(&schp->lock, flag);
if (flushed) {
if (schp == rchp) {
if (t4_clear_cq_armed(&rchp->cq) &&
(rq_flushed || sq_flushed)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
} else {
if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
spin_lock_irqsave(&schp->comp_handler_lock, flag); spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); (*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, flag); spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
} }
}
} }
static void flush_qp(struct c4iw_qp *qhp) static void flush_qp(struct c4iw_qp *qhp)
......
...@@ -531,6 +531,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq) ...@@ -531,6 +531,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
return !wq->rq.queue[wq->rq.size].status.db_off; return !wq->rq.queue[wq->rq.size].status.db_off;
} }
enum t4_cq_flags {
CQ_ARMED = 1,
};
struct t4_cq { struct t4_cq {
struct t4_cqe *queue; struct t4_cqe *queue;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -551,12 +555,19 @@ struct t4_cq { ...@@ -551,12 +555,19 @@ struct t4_cq {
u16 cidx_inc; u16 cidx_inc;
u8 gen; u8 gen;
u8 error; u8 error;
unsigned long flags;
}; };
static inline int t4_clear_cq_armed(struct t4_cq *cq)
{
return test_and_clear_bit(CQ_ARMED, &cq->flags);
}
static inline int t4_arm_cq(struct t4_cq *cq, int se) static inline int t4_arm_cq(struct t4_cq *cq, int se)
{ {
u32 val; u32 val;
set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) { while (cq->cidx_inc > CIDXINC_MASK) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
INGRESSQID(cq->cqid); INGRESSQID(cq->cqid);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册