提交 90f104da 编写于 作者: R Roland Dreier

[IB] mthca: SRQ limit reached events

Our hardware supports generating an event when the number of receives
posted to a shared receive queue (SRQ) falls below a user-specified
limit.  Implement mthca_modify_srq() to arm the limit, and add code to
handle dispatching SRQ events when they occur.
Signed-off-by: NRoland Dreier <rolandd@cisco.com>
上级 116c0074
......@@ -448,6 +448,8 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
struct ib_srq_attr *attr, struct mthca_srq *srq);
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
......
......@@ -83,7 +83,8 @@ enum {
MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
MTHCA_EVENT_TYPE_COMM_EST = 0x02,
MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
......@@ -110,8 +111,9 @@ enum {
(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
#define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE)
#define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
#define MTHCA_EQ_DB_INC_CI (1 << 24)
......@@ -141,6 +143,9 @@ struct mthca_eqe {
struct {
__be32 qpn;
} __attribute__((packed)) qp;
struct {
__be32 srqn;
} __attribute__((packed)) srq;
struct {
__be32 cqn;
u32 reserved1;
......@@ -305,6 +310,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
IB_EVENT_SQ_DRAINED);
break;
case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_LAST_WQE_REACHED);
break;
case MTHCA_EVENT_TYPE_SRQ_LIMIT:
mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
IB_EVENT_SRQ_LIMIT_REACHED);
break;
case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_FATAL);
......
......@@ -1084,6 +1084,7 @@ int mthca_register_device(struct mthca_dev *dev)
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
dev->ib_dev.create_srq = mthca_create_srq;
dev->ib_dev.modify_srq = mthca_modify_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
if (mthca_is_memfree(dev))
......
......@@ -332,6 +332,29 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
mthca_free_mailbox(dev, mailbox);
}
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
int ret;
u8 status;
/* We don't support resizing SRQs (yet?) */
if (attr_mask & IB_SRQ_MAX_WR)
return -EINVAL;
if (attr_mask & IB_SRQ_LIMIT) {
ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
if (ret)
return ret;
if (status)
return -EINVAL;
}
return 0;
}
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type)
{
......@@ -354,7 +377,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
event.device = &dev->ib_dev;
event.event = event_type;
event.element.srq = &srq->ibsrq;
event.element.srq = &srq->ibsrq;
srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
out:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册