提交 926a01dc 编写于 作者: W Wei Hu(Xavier) 提交者: Doug Ledford

RDMA/hns: Add QP operations support for hip08 SoC

This patch implements QP operations for hip08 RoCE driver and
fixes some checkpatch warning about print message in QP function.
The QP operations includes create QP, query QP, modify QP and
destroy QP.
Signed-off-by: NLijun Ou <oulijun@huawei.com>
Signed-off-by: NShaobo Xu <xushaobo2@huawei.com>
Signed-off-by: NWei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 93aa2187
......@@ -48,6 +48,10 @@ enum {
HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9,
HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa,
/* QPC operation */
HNS_ROCE_CMD_MODIFY_QPC = 0x41,
HNS_ROCE_CMD_QUERY_QPC = 0x42,
/* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
......
......@@ -304,6 +304,12 @@ struct hns_roce_wq {
void __iomem *db_reg_l;
};
struct hns_roce_sge {
int sge_cnt; /* SGE num */
int offset;
int sge_shift;/* SGE size */
};
struct hns_roce_buf_list {
void *buf;
dma_addr_t map;
......@@ -455,6 +461,9 @@ struct hns_roce_qp {
atomic_t refcount;
struct completion free;
struct hns_roce_sge sge;
u32 next_sge;
};
struct hns_roce_sqp {
......@@ -509,6 +518,7 @@ struct hns_roce_caps {
int num_cqs;
int max_cqes;
int min_cqes;
u32 min_wqes;
int reserved_cqs;
int num_aeq_vectors; /* 1 */
int num_comp_vectors; /* 32 ceq */
......@@ -788,6 +798,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
......
......@@ -1475,6 +1475,7 @@ int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
......
......@@ -104,6 +104,9 @@
#define V2_CQ_DB_REQ_NOT 1
#define V2_CQ_STATE_VALID 1
#define V2_QKEY_VAL 0x80010000
#define GID_LEN_V2 16
#define HNS_ROCE_V2_CQE_QPN_MASK 0x3ffff
......@@ -262,6 +265,453 @@ struct hns_roce_v2_cq_context {
#define V2_CQC_BYTE_64_SE_CQE_IDX_S 0
#define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0)
enum{
V2_MPT_ST_VALID = 0x1,
};
enum hns_roce_v2_qp_state {
HNS_ROCE_QP_ST_RST,
HNS_ROCE_QP_ST_INIT,
HNS_ROCE_QP_ST_RTR,
HNS_ROCE_QP_ST_RTS,
HNS_ROCE_QP_ST_SQER,
HNS_ROCE_QP_ST_SQD,
HNS_ROCE_QP_ST_ERR,
HNS_ROCE_QP_ST_SQ_DRAINING,
HNS_ROCE_QP_NUM_ST
};
struct hns_roce_v2_qp_context {
u32 byte_4_sqpn_tst;
u32 wqe_sge_ba;
u32 byte_12_sq_hop;
u32 byte_16_buf_ba_pg_sz;
u32 byte_20_smac_sgid_idx;
u32 byte_24_mtu_tc;
u32 byte_28_at_fl;
u8 dgid[GID_LEN_V2];
u32 dmac;
u32 byte_52_udpspn_dmac;
u32 byte_56_dqpn_err;
u32 byte_60_qpst_mapid;
u32 qkey_xrcd;
u32 byte_68_rq_db;
u32 rq_db_record_addr;
u32 byte_76_srqn_op_en;
u32 byte_80_rnr_rx_cqn;
u32 byte_84_rq_ci_pi;
u32 rq_cur_blk_addr;
u32 byte_92_srq_info;
u32 byte_96_rx_reqmsn;
u32 rq_nxt_blk_addr;
u32 byte_104_rq_sge;
u32 byte_108_rx_reqepsn;
u32 rq_rnr_timer;
u32 rx_msg_len;
u32 rx_rkey_pkt_info;
u64 rx_va;
u32 byte_132_trrl;
u32 trrl_ba;
u32 byte_140_raq;
u32 byte_144_raq;
u32 byte_148_raq;
u32 byte_152_raq;
u32 byte_156_raq;
u32 byte_160_sq_ci_pi;
u32 sq_cur_blk_addr;
u32 byte_168_irrl_idx;
u32 byte_172_sq_psn;
u32 byte_176_msg_pktn;
u32 sq_cur_sqe_blk_addr;
u32 byte_184_irrl_idx;
u32 cur_sge_offset;
u32 byte_192_ext_sge;
u32 byte_196_sq_psn;
u32 byte_200_sq_max;
u32 irrl_ba;
u32 byte_208_irrl;
u32 byte_212_lsn;
u32 sq_timer;
u32 byte_220_retry_psn_msn;
u32 byte_224_retry_msg;
u32 rx_sq_cur_blk_addr;
u32 byte_232_irrl_sge;
u32 irrl_cur_sge_offset;
u32 byte_240_irrl_tail;
u32 byte_244_rnr_rxack;
u32 byte_248_ack_psn;
u32 byte_252_err_txcqn;
u32 byte_256_sqflush_rqcqe;
};
#define V2_QPC_BYTE_4_TST_S 0
#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
#define V2_QPC_BYTE_4_SGE_SHIFT_S 3
#define V2_QPC_BYTE_4_SGE_SHIFT_M GENMASK(7, 3)
#define V2_QPC_BYTE_4_SQPN_S 8
#define V2_QPC_BYTE_4_SQPN_M GENMASK(31, 8)
#define V2_QPC_BYTE_12_WQE_SGE_BA_S 0
#define V2_QPC_BYTE_12_WQE_SGE_BA_M GENMASK(28, 0)
#define V2_QPC_BYTE_12_SQ_HOP_NUM_S 29
#define V2_QPC_BYTE_12_SQ_HOP_NUM_M GENMASK(30, 29)
#define V2_QPC_BYTE_12_RSVD_LKEY_EN_S 31
#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S 0
#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M GENMASK(3, 0)
#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S 4
#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M GENMASK(7, 4)
#define V2_QPC_BYTE_16_PD_S 8
#define V2_QPC_BYTE_16_PD_M GENMASK(31, 8)
#define V2_QPC_BYTE_20_RQ_HOP_NUM_S 0
#define V2_QPC_BYTE_20_RQ_HOP_NUM_M GENMASK(1, 0)
#define V2_QPC_BYTE_20_SGE_HOP_NUM_S 2
#define V2_QPC_BYTE_20_SGE_HOP_NUM_M GENMASK(3, 2)
#define V2_QPC_BYTE_20_RQWS_S 4
#define V2_QPC_BYTE_20_RQWS_M GENMASK(7, 4)
#define V2_QPC_BYTE_20_SQ_SHIFT_S 8
#define V2_QPC_BYTE_20_SQ_SHIFT_M GENMASK(11, 8)
#define V2_QPC_BYTE_20_RQ_SHIFT_S 12
#define V2_QPC_BYTE_20_RQ_SHIFT_M GENMASK(15, 12)
#define V2_QPC_BYTE_20_SGID_IDX_S 16
#define V2_QPC_BYTE_20_SGID_IDX_M GENMASK(23, 16)
#define V2_QPC_BYTE_20_SMAC_IDX_S 24
#define V2_QPC_BYTE_20_SMAC_IDX_M GENMASK(31, 24)
#define V2_QPC_BYTE_24_HOP_LIMIT_S 0
#define V2_QPC_BYTE_24_HOP_LIMIT_M GENMASK(7, 0)
#define V2_QPC_BYTE_24_TC_S 8
#define V2_QPC_BYTE_24_TC_M GENMASK(15, 8)
#define V2_QPC_BYTE_24_VLAN_IDX_S 16
#define V2_QPC_BYTE_24_VLAN_IDX_M GENMASK(27, 16)
#define V2_QPC_BYTE_24_MTU_S 28
#define V2_QPC_BYTE_24_MTU_M GENMASK(31, 28)
#define V2_QPC_BYTE_28_FL_S 0
#define V2_QPC_BYTE_28_FL_M GENMASK(19, 0)
#define V2_QPC_BYTE_28_SL_S 20
#define V2_QPC_BYTE_28_SL_M GENMASK(23, 20)
#define V2_QPC_BYTE_28_CNP_TX_FLAG_S 24
#define V2_QPC_BYTE_28_CE_FLAG_S 25
#define V2_QPC_BYTE_28_LBI_S 26
#define V2_QPC_BYTE_28_AT_S 27
#define V2_QPC_BYTE_28_AT_M GENMASK(31, 27)
#define V2_QPC_BYTE_52_DMAC_S 0
#define V2_QPC_BYTE_52_DMAC_M GENMASK(15, 0)
#define V2_QPC_BYTE_52_UDPSPN_S 16
#define V2_QPC_BYTE_52_UDPSPN_M GENMASK(31, 16)
#define V2_QPC_BYTE_56_DQPN_S 0
#define V2_QPC_BYTE_56_DQPN_M GENMASK(23, 0)
#define V2_QPC_BYTE_56_SQ_TX_ERR_S 24
#define V2_QPC_BYTE_56_SQ_RX_ERR_S 25
#define V2_QPC_BYTE_56_RQ_TX_ERR_S 26
#define V2_QPC_BYTE_56_RQ_RX_ERR_S 27
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
#define V2_QPC_BYTE_60_MAPID_S 0
#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0)
#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13
#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14
#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15
#define V2_QPC_BYTE_60_TEMPID_S 16
#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16)
#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23
#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28
#define V2_QPC_BYTE_60_QP_ST_S 29
#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
#define V2_QPC_BYTE_68_RQ_RECORD_EN_S 0
#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S 1
#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M GENMASK(31, 1)
#define V2_QPC_BYTE_76_SRQN_S 0
#define V2_QPC_BYTE_76_SRQN_M GENMASK(23, 0)
#define V2_QPC_BYTE_76_SRQ_EN_S 24
#define V2_QPC_BYTE_76_RRE_S 25
#define V2_QPC_BYTE_76_RWE_S 26
#define V2_QPC_BYTE_76_ATE_S 27
#define V2_QPC_BYTE_76_RQIE_S 28
#define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
#define V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S 0
#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M GENMASK(15, 0)
#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S 16
#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M GENMASK(31, 16)
#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S 0
#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_92_SRQ_INFO_S 20
#define V2_QPC_BYTE_92_SRQ_INFO_M GENMASK(31, 20)
#define V2_QPC_BYTE_96_RX_REQ_MSN_S 0
#define V2_QPC_BYTE_96_RX_REQ_MSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S 0
#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S 24
#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M GENMASK(31, 24)
#define V2_QPC_BYTE_108_INV_CREDIT_S 0
#define V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S 3
#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S 4
#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M GENMASK(6, 4)
#define V2_QPC_BYTE_108_RX_REQ_RNR_S 7
#define V2_QPC_BYTE_108_RX_REQ_EPSN_S 8
#define V2_QPC_BYTE_108_RX_REQ_EPSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_S 0
#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_M GENMASK(7, 0)
#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_S 8
#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_M GENMASK(15, 8)
#define V2_QPC_BYTE_132_TRRL_BA_S 16
#define V2_QPC_BYTE_132_TRRL_BA_M GENMASK(31, 16)
#define V2_QPC_BYTE_140_TRRL_BA_S 0
#define V2_QPC_BYTE_140_TRRL_BA_M GENMASK(11, 0)
#define V2_QPC_BYTE_140_RR_MAX_S 12
#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S 24
#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M GENMASK(31, 24)
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
#define V2_QPC_BYTE_144_RESP_RTY_FLG_S 31
#define V2_QPC_BYTE_148_RQ_MSN_S 0
#define V2_QPC_BYTE_148_RQ_MSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
#define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
#define V2_QPC_BYTE_152_RAQ_PSN_S 8
#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
#define V2_QPC_BYTE_156_RAQ_USE_PKTN_S 0
#define V2_QPC_BYTE_156_RAQ_USE_PKTN_M GENMASK(23, 0)
#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S 0
#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M GENMASK(15, 0)
#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S 16
#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M GENMASK(31, 16)
#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S 0
#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S 20
#define V2_QPC_BYTE_168_LP_SGEN_INI_S 21
#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 21)
#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24
#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24)
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
#define V2_QPC_BYTE_172_ACK_REQ_FREQ_S 0
#define V2_QPC_BYTE_172_ACK_REQ_FREQ_M GENMASK(5, 0)
#define V2_QPC_BYTE_172_MSG_RNR_FLG_S 6
#define V2_QPC_BYTE_172_FRE_S 7
#define V2_QPC_BYTE_172_SQ_CUR_PSN_S 8
#define V2_QPC_BYTE_172_SQ_CUR_PSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_176_MSG_USE_PKTN_S 0
#define V2_QPC_BYTE_176_MSG_USE_PKTN_M GENMASK(23, 0)
#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_S 24
#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_M GENMASK(31, 24)
#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S 0
#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_184_IRRL_IDX_MSB_S 20
#define V2_QPC_BYTE_184_IRRL_IDX_MSB_M GENMASK(31, 20)
#define V2_QPC_BYTE_192_CUR_SGE_IDX_S 0
#define V2_QPC_BYTE_192_CUR_SGE_IDX_M GENMASK(23, 0)
#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S 24
#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M GENMASK(31, 24)
#define V2_QPC_BYTE_196_IRRL_HEAD_S 0
#define V2_QPC_BYTE_196_IRRL_HEAD_M GENMASK(7, 0)
#define V2_QPC_BYTE_196_SQ_MAX_PSN_S 8
#define V2_QPC_BYTE_196_SQ_MAX_PSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_200_SQ_MAX_IDX_S 0
#define V2_QPC_BYTE_200_SQ_MAX_IDX_M GENMASK(15, 0)
#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_S 16
#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_M GENMASK(31, 16)
#define V2_QPC_BYTE_208_IRRL_BA_S 0
#define V2_QPC_BYTE_208_IRRL_BA_M GENMASK(25, 0)
#define V2_QPC_BYTE_208_PKT_RNR_FLG_S 26
#define V2_QPC_BYTE_208_PKT_RTY_FLG_S 27
#define V2_QPC_BYTE_208_RMT_E2E_S 28
#define V2_QPC_BYTE_208_SR_MAX_S 29
#define V2_QPC_BYTE_208_SR_MAX_M GENMASK(31, 29)
#define V2_QPC_BYTE_212_LSN_S 0
#define V2_QPC_BYTE_212_LSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_212_RETRY_NUM_INIT_S 24
#define V2_QPC_BYTE_212_RETRY_NUM_INIT_M GENMASK(26, 24)
#define V2_QPC_BYTE_212_CHECK_FLG_S 27
#define V2_QPC_BYTE_212_CHECK_FLG_M GENMASK(28, 27)
#define V2_QPC_BYTE_212_RETRY_CNT_S 29
#define V2_QPC_BYTE_212_RETRY_CNT_M GENMASK(31, 29)
#define V2_QPC_BYTE_220_RETRY_MSG_MSN_S 0
#define V2_QPC_BYTE_220_RETRY_MSG_MSN_M GENMASK(15, 0)
#define V2_QPC_BYTE_220_RETRY_MSG_PSN_S 16
#define V2_QPC_BYTE_220_RETRY_MSG_PSN_M GENMASK(31, 16)
#define V2_QPC_BYTE_224_RETRY_MSG_PSN_S 0
#define V2_QPC_BYTE_224_RETRY_MSG_PSN_M GENMASK(7, 0)
#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S 8
#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S 0
#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
#define V2_QPC_BYTE_240_IRRL_TAIL_RD_S 8
#define V2_QPC_BYTE_240_IRRL_TAIL_RD_M GENMASK(15, 8)
#define V2_QPC_BYTE_240_RX_ACK_MSN_S 16
#define V2_QPC_BYTE_240_RX_ACK_MSN_M GENMASK(31, 16)
#define V2_QPC_BYTE_244_RX_ACK_EPSN_S 0
#define V2_QPC_BYTE_244_RX_ACK_EPSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_244_RNR_NUM_INIT_S 24
#define V2_QPC_BYTE_244_RNR_NUM_INIT_M GENMASK(26, 24)
#define V2_QPC_BYTE_244_RNR_CNT_S 27
#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
#define V2_QPC_BYTE_248_IRRL_PSN_S 0
#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_248_ACK_PSN_ERR_S 24
#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S 25
#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M GENMASK(26, 25)
#define V2_QPC_BYTE_248_IRRL_PSN_VLD_S 27
#define V2_QPC_BYTE_248_RNR_RETRY_FLAG_S 28
#define V2_QPC_BYTE_248_CQ_ERR_IND_S 31
#define V2_QPC_BYTE_252_TX_CQN_S 0
#define V2_QPC_BYTE_252_TX_CQN_M GENMASK(23, 0)
#define V2_QPC_BYTE_252_SIG_TYPE_S 24
#define V2_QPC_BYTE_252_ERR_TYPE_S 25
#define V2_QPC_BYTE_252_ERR_TYPE_M GENMASK(31, 25)
#define V2_QPC_BYTE_256_RQ_CQE_IDX_S 0
#define V2_QPC_BYTE_256_RQ_CQE_IDX_M GENMASK(15, 0)
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
struct hns_roce_v2_cqe {
u32 byte_4;
u32 rkey_immtdata;
......
......@@ -286,20 +286,27 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
/* In v1 engine, parameter verification procession */
max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
else
max_cnt = cap->max_recv_wr;
hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
return -EINVAL;
}
max_cnt = max(1U, cap->max_recv_sge);
hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
/* WQE is fixed for 64B */
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
if (hr_dev->caps.max_rq_sg <= 2)
hr_qp->rq.wqe_shift =
ilog2(hr_dev->caps.max_rq_desc_sz);
else
hr_qp->rq.wqe_shift =
ilog2(hr_dev->caps.max_rq_desc_sz
* hr_qp->rq.max_gs);
}
cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
......@@ -309,11 +316,13 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
}
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride);
u32 max_cnt;
/* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
......@@ -323,18 +332,61 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
cap->max_send_sge);
return -EINVAL;
}
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
hr_qp->sge.sge_shift = 4;
/* Get buf size, SQ and RQ are aligned to page_szie */
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
if (hr_dev->caps.max_sq_sg <= 2) {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.offset = 0;
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
if (hr_qp->sge.sge_cnt) {
hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
PAGE_SIZE);
hr_qp->rq.offset = hr_qp->sge.offset +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift),
PAGE_SIZE);
} else {
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
PAGE_SIZE);
}
}
return 0;
}
......@@ -345,11 +397,12 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
{
struct device *dev = hr_dev->dev;
u32 max_cnt;
int size;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
......@@ -357,27 +410,45 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0;
/* In v1 engine, parameter verification procession */
max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
else
max_cnt = cap->max_send_wr;
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
if (hr_qp->sq.max_gs > 2) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
hr_qp->sge.sge_shift = 4;
}
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
hr_qp->sq.offset = 0;
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
PAGE_SIZE);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.offset = size;
size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, PAGE_SIZE);
}
hr_qp->rq.offset = size;
size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
PAGE_SIZE);
hr_qp->buff_size = size;
/* Get wr and sge number which send */
cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
......@@ -425,7 +496,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
goto err_out;
......@@ -528,7 +600,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
}
if ((init_attr->qp_type) == IB_QPT_GSI) {
if (init_attr->qp_type == IB_QPT_GSI &&
hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
/* In v1 engine, GSI QP context in RoCE engine's register */
ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
if (ret) {
dev_err(dev, "hns_roce_qp_alloc failed!\n");
......@@ -700,7 +774,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
if (attr->path_mtu > IB_MTU_2048 ||
if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
attr->path_mtu > IB_MTU_4096) ||
(hr_dev->caps.max_mtu == IB_MTU_2048 &&
attr->path_mtu > IB_MTU_2048) ||
attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
......@@ -724,9 +801,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
ret = -EPERM;
dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
new_state);
ret = 0;
goto out;
}
......@@ -804,6 +879,13 @@ void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
}
EXPORT_SYMBOL_GPL(get_send_wqe);
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
{
return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
(n << hr_qp->sge.sge_shift));
}
EXPORT_SYMBOL_GPL(get_send_extend_sge);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册