提交 84b105db 编写于 作者: N Naresh Gottumukkala 提交者: Roland Dreier

RDMA/ocrdma: Fill PVID in UMC case

In UMC case, driver needs to fill PVID in the address vector
template for UD traffic.
Signed-off-by: NNaresh Gottumukkala <bgottumukkala@emulex.com>
Signed-off-by: NRoland Dreier <roland@purestorage.com>
上级 38754397
...@@ -170,6 +170,7 @@ struct ocrdma_dev { ...@@ -170,6 +170,7 @@ struct ocrdma_dev {
struct rcu_head rcu; struct rcu_head rcu;
int id; int id;
u64 stag_arr[OCRDMA_MAX_STAG]; u64 stag_arr[OCRDMA_MAX_STAG];
u16 pvid;
}; };
struct ocrdma_cq { struct ocrdma_cq {
......
...@@ -50,6 +50,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, ...@@ -50,6 +50,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
ah->sgid_index = attr->grh.sgid_index; ah->sgid_index = attr->grh.sgid_index;
vlan_tag = rdma_get_vlan_id(&attr->grh.dgid); vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) { if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100); eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
......
...@@ -544,7 +544,10 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, ...@@ -544,7 +544,10 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->cqid_pages = num_pages; cmd->cqid_pages = num_pages;
cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
cmd->async_event_bitmap = Bit(20);
cmd->async_event_bitmap = Bit(OCRDMA_ASYNC_GRP5_EVE_CODE);
cmd->async_event_bitmap |= Bit(OCRDMA_ASYNC_RDMA_EVE_CODE);
cmd->async_cqid_ringsize = cq->id; cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
...@@ -727,6 +730,29 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, ...@@ -727,6 +730,29 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
} }
static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
struct ocrdma_ae_mcqe *cqe)
{
struct ocrdma_ae_pvid_mcqe *evt;
int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
switch (type) {
case OCRDMA_ASYNC_EVENT_PVID_STATE:
evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
dev->pvid = ((evt->tag_enabled &
OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
break;
default:
/* Not interested evts. */
break;
}
}
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{ {
/* async CQE processing */ /* async CQE processing */
...@@ -734,8 +760,10 @@ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) ...@@ -734,8 +760,10 @@ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >> u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT; OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
if (evt_code == OCRDMA_ASYNC_EVE_CODE) if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
ocrdma_dispatch_ibevent(dev, cqe); ocrdma_dispatch_ibevent(dev, cqe);
else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
ocrdma_process_grp5_aync(dev, cqe);
else else
pr_err("%s(%d) invalid evt code=0x%x\n", __func__, pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
dev->id, evt_code); dev->id, evt_code);
......
...@@ -338,6 +338,20 @@ struct ocrdma_ae_mcqe { ...@@ -338,6 +338,20 @@ struct ocrdma_ae_mcqe {
u32 valid_ae_event; u32 valid_ae_event;
}; };
enum {
OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT = 0,
OCRDMA_AE_PVID_MCQE_ENABLED_MASK = 0xFF,
OCRDMA_AE_PVID_MCQE_TAG_SHIFT = 16,
OCRDMA_AE_PVID_MCQE_TAG_MASK = 0xFFFF << OCRDMA_AE_PVID_MCQE_TAG_SHIFT
};
struct ocrdma_ae_pvid_mcqe {
u32 tag_enabled;
u32 event_tag;
u32 rsvd1;
u32 rsvd2;
};
enum { enum {
OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16, OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16,
OCRDMA_AE_MPA_MCQE_REQ_ID_MASK = 0xFFFF << OCRDMA_AE_MPA_MCQE_REQ_ID_MASK = 0xFFFF <<
...@@ -388,7 +402,9 @@ struct ocrdma_ae_qp_mcqe { ...@@ -388,7 +402,9 @@ struct ocrdma_ae_qp_mcqe {
u32 valid_ae_event; u32 valid_ae_event;
}; };
#define OCRDMA_ASYNC_EVE_CODE 0x14 #define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3
enum OCRDMA_ASYNC_EVENT_TYPE { enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_CQ_ERROR = 0x00, OCRDMA_CQ_ERROR = 0x00,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册